summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/amdxdna/aie2_ctx.c29
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.c2
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.h3
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox.c17
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c67
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h5
-rw-r--r--drivers/accel/ivpu/ivpu_hw.c7
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h9
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.c3
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.h2
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c7
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.h2
-rw-r--r--drivers/accel/ivpu/ivpu_job.c210
-rw-r--r--drivers/accel/ivpu/ivpu_job.h1
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.c112
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.h2
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.c13
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.h2
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c21
-rw-r--r--drivers/accel/ivpu/ivpu_pm.h2
-rw-r--r--drivers/accel/ivpu/ivpu_sysfs.c5
-rw-r--r--drivers/accel/qaic/qaic_drv.c2
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/acpi_pnp.c2
-rw-r--r--drivers/acpi/ec.c28
-rw-r--r--drivers/acpi/hed.c7
-rw-r--r--drivers/acpi/pptt.c15
-rw-r--r--drivers/android/binder.c18
-rw-r--r--drivers/android/binder_internal.h8
-rw-r--r--drivers/android/binderfs.c2
-rw-r--r--drivers/ata/libata-scsi.c25
-rw-r--r--drivers/auxdisplay/charlcd.c5
-rw-r--r--drivers/auxdisplay/charlcd.h5
-rw-r--r--drivers/auxdisplay/hd44780.c2
-rw-r--r--drivers/auxdisplay/lcd2s.c2
-rw-r--r--drivers/auxdisplay/panel.c2
-rw-r--r--drivers/base/base.h17
-rw-r--r--drivers/base/bus.c2
-rw-r--r--drivers/base/core.c38
-rw-r--r--drivers/base/cpu.c3
-rw-r--r--drivers/base/dd.c7
-rw-r--r--drivers/base/faux.c15
-rw-r--r--drivers/base/module.c13
-rw-r--r--drivers/base/power/main.c7
-rw-r--r--drivers/block/loop.c62
-rw-r--r--drivers/block/null_blk/main.c86
-rw-r--r--drivers/block/ublk_drv.c783
-rw-r--r--drivers/bluetooth/btintel_pcie.c57
-rw-r--r--drivers/bluetooth/btmtk.c10
-rw-r--r--drivers/bluetooth/btmtksdio.c15
-rw-r--r--drivers/bluetooth/btusb.c117
-rw-r--r--drivers/bluetooth/hci_qca.c14
-rw-r--r--drivers/char/misc.c2
-rw-r--r--drivers/char/tpm/tpm-buf.c6
-rw-r--r--drivers/char/tpm/tpm2-sessions.c22
-rw-r--r--drivers/char/tpm/tpm_tis_core.h2
-rw-r--r--drivers/char/virtio_console.c7
-rw-r--r--drivers/clk/clk-s2mps11.c3
-rw-r--r--drivers/clk/clk.c4
-rw-r--r--drivers/clk/imx/clk-imx8mp.c151
-rw-r--r--drivers/clk/qcom/Kconfig2
-rw-r--r--drivers/clk/qcom/camcc-sm8250.c56
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c52
-rw-r--r--drivers/clk/qcom/lpassaudiocc-sc7280.c23
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c102
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.c12
-rw-r--r--drivers/clk/samsung/clk-exynosautov920.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun20i-d1.c44
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.c36
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.h22
-rw-r--r--drivers/clocksource/i8253.c4
-rw-r--r--drivers/clocksource/mips-gic-timer.c6
-rw-r--r--drivers/clocksource/timer-riscv.c6
-rw-r--r--drivers/comedi/drivers/jr3_pci.c2
-rw-r--r--drivers/cpufreq/Kconfig.arm20
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c16
-rw-r--r--drivers/cpufreq/amd-pstate.c1
-rw-r--r--drivers/cpufreq/apple-soc-cpufreq.c10
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c1
-rw-r--r--drivers/cpufreq/cpufreq.c42
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c3
-rw-r--r--drivers/cpufreq/freq_table.c10
-rw-r--r--drivers/cpufreq/intel_pstate.c3
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c10
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c13
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c18
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c7
-rw-r--r--drivers/cpuidle/governors/menu.c13
-rw-r--r--drivers/crypto/atmel-sha204a.c6
-rw-r--r--drivers/crypto/ccp/sp-pci.c1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c7
-rw-r--r--drivers/crypto/mxs-dcp.c8
-rw-r--r--drivers/cxl/core/regs.c4
-rw-r--r--drivers/dma-buf/dma-resv.c5
-rw-r--r--drivers/dma-buf/udmabuf.c2
-rw-r--r--drivers/dma/bcm2835-dma.c2
-rw-r--r--drivers/dma/fsl-edma-main.c2
-rw-r--r--drivers/dma/idxd/cdev.c13
-rw-r--r--drivers/dma/idxd/init.c159
-rw-r--r--drivers/dma/ti/k3-udma-glue.c15
-rw-r--r--drivers/dma/ti/k3-udma.c10
-rw-r--r--drivers/dpll/dpll_core.c5
-rw-r--r--drivers/edac/altera_edac.c9
-rw-r--r--drivers/edac/altera_edac.h2
-rw-r--r--drivers/edac/ie31200_edac.c28
-rw-r--r--drivers/firmware/arm_ffa/bus.c1
-rw-r--r--drivers/firmware/arm_ffa/driver.c15
-rw-r--r--drivers/firmware/arm_scmi/bus.c22
-rw-r--r--drivers/firmware/arm_scmi/driver.c13
-rw-r--r--drivers/firmware/cirrus/Kconfig5
-rw-r--r--drivers/firmware/stratix10-svc.c14
-rw-r--r--drivers/firmware/xilinx/zynqmp.c6
-rw-r--r--drivers/fpga/altera-cvp.c2
-rw-r--r--drivers/gpio/gpio-pca953x.c6
-rw-r--r--drivers/gpio/gpio-virtuser.c12
-rw-r--r--drivers/gpio/gpiolib-of.c6
-rw-r--r--drivers/gpio/gpiolib.c3
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c172
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c453
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c48
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c25
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c124
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c69
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c41
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c41
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c41
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c38
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c71
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c43
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c137
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c26
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c91
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c56
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dc_common.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c55
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c98
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl.c87
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h6
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c17
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c47
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h32
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h48
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c5
-rw-r--r--drivers/gpu/drm/ast/ast_main.c30
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c10
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c2
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c28
-rw-r--r--drivers/gpu/drm/drm_buddy.c5
-rw-r--r--drivers/gpu/drm/drm_edid.c1
-rw-r--r--drivers/gpu/drm/drm_file.c6
-rw-r--r--drivers/gpu/drm/drm_gem.c4
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c3
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c5
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h2
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c29
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c195
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c32
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c2
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c1
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c4
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c25
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c40
-rw-r--r--drivers/gpu/drm/tests/drm_gem_shmem_test.c3
-rw-r--r--drivers/gpu/drm/tiny/panel-mipi-dbi.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c3
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c25
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c28
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c9
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c3
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gpu_commands.h1
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_mi_commands.h4
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h5
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c7
-rw-r--r--drivers/gpu/drm/xe/tests/xe_rtp_test.c2
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c22
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c3
-rw-r--r--drivers/gpu/drm/xe/xe_device.c10
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c8
-rw-r--r--drivers/gpu/drm/xe/xe_gen_wa_oob.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c22
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.c11
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c9
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c20
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c23
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c11
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c49
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c66
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h10
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c12
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c22
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h10
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c8
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c22
-rw-r--r--drivers/gpu/drm/xe/xe_guc_relay.c2
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c18
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c6
-rw-r--r--drivers/gpu/drm/xe/xe_lrc_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c10
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c1
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c37
-rw-r--r--drivers/gpu/drm/xe/xe_pci_sriov.c51
-rw-r--r--drivers/gpu/drm/xe/xe_pci_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c14
-rw-r--r--drivers/gpu/drm/xe/xe_pt.h3
-rw-r--r--drivers/gpu/drm/xe/xe_reg_whitelist.c4
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c20
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c6
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.h2
-rw-r--r--drivers/gpu/drm/xe/xe_sa.c3
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c12
-rw-r--r--drivers/gpu/drm/xe/xe_tile.h1
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c17
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h2
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c71
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.h3
-rw-r--r--drivers/gpu/drm/xe/xe_uc.c8
-rw-r--r--drivers/gpu/drm/xe/xe_uc.h1
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c32
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c26
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules2
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c12
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.c9
-rw-r--r--drivers/hid/hid-ids.h4
-rw-r--r--drivers/hid/hid-quirks.c2
-rw-r--r--drivers/hid/hid-thrustmaster.c1
-rw-r--r--drivers/hid/hid-uclogic-core.c7
-rw-r--r--drivers/hid/usbhid/usbkbd.c2
-rw-r--r--drivers/hv/channel.c65
-rw-r--r--drivers/hv/hyperv_vmbus.h6
-rw-r--r--drivers/hv/vmbus_drv.c100
-rw-r--r--drivers/hwmon/acpi_power_meter.c8
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c5
-rw-r--r--drivers/hwmon/gpio-fan.c16
-rw-r--r--drivers/hwmon/xgene-hwmon.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c26
-rw-r--r--drivers/hwtracing/coresight/coresight-trace-id.c22
-rw-r--r--drivers/hwtracing/intel_th/Kconfig1
-rw-r--r--drivers/hwtracing/intel_th/msu.c31
-rw-r--r--drivers/i2c/busses/i2c-amd-asf-plat.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c4
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c4
-rw-r--r--drivers/i2c/busses/i2c-pxa.c5
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c6
-rw-r--r--drivers/i2c/busses/i2c-qup.c36
-rw-r--r--drivers/i3c/master/svc-i3c-master.c21
-rw-r--r--drivers/iio/accel/adis16201.c4
-rw-r--r--drivers/iio/accel/adxl355_core.c2
-rw-r--r--drivers/iio/accel/adxl367.c10
-rw-r--r--drivers/iio/accel/fxls8962af-core.c7
-rw-r--r--drivers/iio/adc/ad4695.c34
-rw-r--r--drivers/iio/adc/ad7266.c2
-rw-r--r--drivers/iio/adc/ad7606.c164
-rw-r--r--drivers/iio/adc/ad7606.h37
-rw-r--r--drivers/iio/adc/ad7606_spi.c139
-rw-r--r--drivers/iio/adc/ad7768-1.c7
-rw-r--r--drivers/iio/adc/ad7944.c16
-rw-r--r--drivers/iio/adc/dln2-adc.c2
-rw-r--r--drivers/iio/adc/qcom-spmi-iadc.c4
-rw-r--r--drivers/iio/adc/rockchip_saradc.c17
-rw-r--r--drivers/iio/chemical/pms7003.c5
-rw-r--r--drivers/iio/chemical/sps30.c2
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c4
-rw-r--r--drivers/iio/dac/ad3552r-hs.c29
-rw-r--r--drivers/iio/dac/ad3552r-hs.h8
-rw-r--r--drivers/iio/dac/adi-axi-dac.c22
-rw-r--r--drivers/iio/imu/bmi270/bmi270_core.c6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c6
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c7
-rw-r--r--drivers/iio/light/hid-sensor-prox.c22
-rw-r--r--drivers/iio/light/opt3001.c5
-rw-r--r--drivers/iio/pressure/mprls0025pa.h17
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c2
-rw-r--r--drivers/infiniband/core/device.c6
-rw-r--r--drivers/infiniband/core/umem.c36
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c144
-rw-r--r--drivers/infiniband/core/verbs.c11
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c5
-rw-r--r--drivers/input/joystick/xpad.c43
-rw-r--r--drivers/input/keyboard/mtk-pmic-keys.c4
-rw-r--r--drivers/input/mouse/synaptics.c5
-rw-r--r--drivers/input/touchscreen/cyttsp5.c7
-rw-r--r--drivers/iommu/amd/init.c8
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c2
-rw-r--r--drivers/iommu/amd/iommu.c2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c6
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c25
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h1
-rw-r--r--drivers/iommu/dma-iommu.c28
-rw-r--r--drivers/iommu/intel/iommu.c41
-rw-r--r--drivers/iommu/intel/svm.c43
-rw-r--r--drivers/iommu/iommu-priv.h2
-rw-r--r--drivers/iommu/iommu.c31
-rw-r--r--drivers/iommu/iommufd/device.c34
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c3
-rw-r--r--drivers/iommu/of_iommu.c6
-rw-r--r--drivers/irqchip/irq-gic-v2m.c2
-rw-r--r--drivers/irqchip/irq-qcom-mpm.c3
-rw-r--r--drivers/irqchip/irq-renesas-rzv2h.c91
-rw-r--r--drivers/irqchip/irq-riscv-aplic-direct.c24
-rw-r--r--drivers/irqchip/irq-riscv-imsic-early.c8
-rw-r--r--drivers/irqchip/irq-riscv-imsic-platform.c16
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.c96
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.h7
-rw-r--r--drivers/leds/Kconfig1
-rw-r--r--drivers/leds/leds-st1202.c4
-rw-r--r--drivers/leds/rgb/leds-pwm-multicolor.c5
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c16
-rw-r--r--drivers/mailbox/mailbox.c7
-rw-r--r--drivers/mailbox/pcc.c23
-rw-r--r--drivers/mcb/mcb-parse.c2
-rw-r--r--drivers/md/dm-bufio.c9
-rw-r--r--drivers/md/dm-cache-target.c24
-rw-r--r--drivers/md/dm-integrity.c2
-rw-r--r--drivers/md/dm-table.c12
-rw-r--r--drivers/md/dm-vdo/indexer/index-layout.c5
-rw-r--r--drivers/md/dm-vdo/vdo.c11
-rw-r--r--drivers/md/dm.c8
-rw-r--r--drivers/md/raid1.c26
-rw-r--r--drivers/media/cec/core/cec-pin.c11
-rw-r--r--drivers/media/i2c/Kconfig1
-rw-r--r--drivers/media/i2c/adv7180.c34
-rw-r--r--drivers/media/i2c/imx214.c978
-rw-r--r--drivers/media/i2c/imx219.c2
-rw-r--r--drivers/media/i2c/imx335.c21
-rw-r--r--drivers/media/i2c/ov08x40.c56
-rw-r--r--drivers/media/i2c/ov2740.c4
-rw-r--r--drivers/media/i2c/tc358746.c19
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c60
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c4
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c3
-rw-r--r--drivers/media/platform/st/stm32/stm32-csi.c36
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-cap.c11
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-out.c11
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-touch.c11
-rw-r--r--drivers/media/test-drivers/vivid/vivid-sdr-cap.c11
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c2
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c77
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c2
-rw-r--r--drivers/message/fusion/mptscsih.c4
-rw-r--r--drivers/mfd/axp20x.c1
-rw-r--r--drivers/mfd/syscon.c9
-rw-r--r--drivers/mfd/tps65219.c7
-rw-r--r--drivers/misc/eeprom/ee1004.c4
-rw-r--r--drivers/misc/lkdtm/perms.c14
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c8
-rw-r--r--drivers/misc/mei/hw-me-regs.h1
-rw-r--r--drivers/misc/mei/pci-me.c1
-rw-r--r--drivers/misc/mei/vsc-tp.c40
-rw-r--r--drivers/misc/pci_endpoint_test.c6
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c41
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c10
-rw-r--r--drivers/mmc/host/sdhci-msm.c2
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c6
-rw-r--r--drivers/mmc/host/sdhci.c9
-rw-r--r--drivers/mmc/host/sdhci_am654.c35
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c2
-rw-r--r--drivers/net/can/kvaser_pciefd.c182
-rw-r--r--drivers/net/can/m_can/m_can.c3
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-core.c2
-rw-r--r--drivers/net/can/slcan/slcan-core.c26
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c42
-rw-r--r--drivers/net/dsa/b53/b53_common.c240
-rw-r--r--drivers/net/dsa/b53/b53_priv.h3
-rw-r--r--drivers/net/dsa/b53/b53_regs.h14
-rw-r--r--drivers/net/dsa/bcm_sf2.c1
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c135
-rw-r--r--drivers/net/dsa/mt7530.c6
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c5
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c6
-rw-r--r--drivers/net/ethernet/amd/pds_core/adminq.c36
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c42
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c9
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h11
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c11
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c9
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c24
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h4
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c31
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c40
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c29
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h1
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c19
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c2
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h2
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c30
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c45
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c59
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c82
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c208
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.c25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c133
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c235
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c5
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h20
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c86
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c18
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h3
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c24
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c114
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c34
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c124
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c182
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c63
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h10
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c22
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c3
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c213
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h8
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c6
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c27
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h1
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c6
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c7
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c32
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c62
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/tehuti/tn40.c9
-rw-r--r--drivers/net/ethernet/tehuti/tn40.h33
-rw-r--r--drivers/net/ethernet/tehuti/tn40_mdio.c82
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c1
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c2
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c36
-rw-r--r--drivers/net/hyperv/hyperv_net.h13
-rw-r--r--drivers/net/hyperv/netvsc.c57
-rw-r--r--drivers/net/hyperv/netvsc_drv.c62
-rw-r--r--drivers/net/hyperv/rndis_filter.c24
-rw-r--r--drivers/net/ieee802154/ca8210.c9
-rw-r--r--drivers/net/mctp/mctp-i2c.c2
-rw-r--r--drivers/net/mdio/mdio-mux-meson-gxl.c3
-rw-r--r--drivers/net/netdevsim/netdev.c31
-rw-r--r--drivers/net/netdevsim/netdevsim.h1
-rw-r--r--drivers/net/phy/dp83822.c40
-rw-r--r--drivers/net/phy/micrel.c7
-rw-r--r--drivers/net/phy/microchip.c46
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c54
-rw-r--r--drivers/net/phy/phy_device.c53
-rw-r--r--drivers/net/phy/phy_led_triggers.c23
-rw-r--r--drivers/net/phy/phylink.c166
-rw-r--r--drivers/net/usb/r8152.c1
-rw-r--r--drivers/net/usb/rndis_host.c16
-rw-r--r--drivers/net/virtio_net.c146
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c5
-rw-r--r--drivers/net/vmxnet3/vmxnet3_xdp.c2
-rw-r--r--drivers/net/vxlan/vxlan_core.c36
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c8
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c117
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c45
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h4
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c19
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c22
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c145
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h8
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_tx.h10
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c102
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c13
-rw-r--r--drivers/net/wireless/ath/ath12k/rx_desc.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/dr.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/channel.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c80
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c44
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/scan.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c3
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c17
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c15
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c7
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c54
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c14
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c107
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c67
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h6
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c101
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h12
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c55
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c8
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c8
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c8
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c8
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c8
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c4
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c14
-rw-r--r--drivers/net/xen-netfront.c17
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c1
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c18
-rw-r--r--drivers/nvdimm/label.c3
-rw-r--r--drivers/nvme/host/Kconfig1
-rw-r--r--drivers/nvme/host/core.c42
-rw-r--r--drivers/nvme/host/multipath.c5
-rw-r--r--drivers/nvme/host/nvme.h3
-rw-r--r--drivers/nvme/host/pci.c14
-rw-r--r--drivers/nvme/host/tcp.c31
-rw-r--r--drivers/nvme/target/Kconfig1
-rw-r--r--drivers/nvme/target/core.c3
-rw-r--r--drivers/nvme/target/fc.c25
-rw-r--r--drivers/nvme/target/pci-epf.c90
-rw-r--r--drivers/nvme/target/tcp.c3
-rw-r--r--drivers/nvmem/Kconfig1
-rw-r--r--drivers/nvmem/core.c40
-rw-r--r--drivers/nvmem/qfprom.c26
-rw-r--r--drivers/nvmem/rockchip-otp.c17
-rw-r--r--drivers/of/resolver.c37
-rw-r--r--drivers/pci/Kconfig6
-rw-r--r--drivers/pci/ats.c33
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c2
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c5
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c3
-rw-r--r--drivers/pci/controller/vmd.c20
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-mhi.c2
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c2
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c1
-rw-r--r--drivers/pci/msi/msi.c38
-rw-r--r--drivers/pci/setup-bus.c6
-rw-r--r--drivers/perf/arm-cmn.c11
-rw-r--r--drivers/perf/arm_pmuv3.c4
-rw-r--r--drivers/phy/phy-core.c7
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c135
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c6
-rw-r--r--drivers/phy/rockchip/phy-rockchip-usbdp.c88
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c7
-rw-r--r--drivers/phy/starfive/phy-jh7110-usb.c7
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c46
-rw-r--r--drivers/phy/tegra/xusb.c8
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c44
-rw-r--r--drivers/pinctrl/devicetree.c10
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c6
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-airoha.c159
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c14
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c23
-rw-r--r--drivers/pinctrl/qcom/Kconfig.msm4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c23
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8917.c8
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8750.c4
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza2.c3
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c19
-rw-r--r--drivers/pinctrl/sophgo/pinctrl-cv18xx.c33
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c59
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.h6
-rw-r--r--drivers/platform/x86/amd/hsmp/Kconfig2
-rw-r--r--drivers/platform/x86/amd/hsmp/acpi.c10
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.c1
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.h4
-rw-r--r--drivers/platform/x86/amd/hsmp/plat.c42
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c7
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c7
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c23
-rw-r--r--drivers/platform/x86/asus-wmi.c14
-rw-r--r--drivers/platform/x86/dell/alienware-wmi.c9
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c2
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c33
-rw-r--r--drivers/platform/x86/ideapad-laptop.c16
-rw-r--r--drivers/platform/x86/intel/hid.c21
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c13
-rw-r--r--drivers/platform/x86/think-lmi.c26
-rw-r--r--drivers/platform/x86/think-lmi.h1
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c7
-rw-r--r--drivers/platform/x86/x86-android-tablets/dmi.c14
-rw-r--r--drivers/platform/x86/x86-android-tablets/other.c124
-rw-r--r--drivers/platform/x86/x86-android-tablets/x86-android-tablets.h3
-rw-r--r--drivers/pmdomain/core.c2
-rw-r--r--drivers/pmdomain/imx/gpcv2.c2
-rw-r--r--drivers/pmdomain/renesas/rcar-gen4-sysc.c5
-rw-r--r--drivers/pmdomain/renesas/rcar-sysc.c5
-rw-r--r--drivers/power/supply/axp20x_battery.c21
-rw-r--r--drivers/pps/generators/pps_gen-dummy.c2
-rw-r--r--drivers/pps/generators/pps_gen.c14
-rw-r--r--drivers/pps/generators/sysfs.c6
-rw-r--r--drivers/ptp/ptp_ocp.c76
-rw-r--r--drivers/pwm/core.c13
-rw-r--r--drivers/pwm/pwm-axi-pwmgen.c10
-rw-r--r--drivers/regulator/ad5398.c12
-rw-r--r--drivers/regulator/max20086-regulator.c7
-rw-r--r--drivers/regulator/rk808-regulator.c4
-rw-r--r--drivers/remoteproc/qcom_wcnss.c34
-rw-r--r--drivers/rtc/class.c2
-rw-r--r--drivers/rtc/lib.c24
-rw-r--r--drivers/rtc/rtc-ds1307.c4
-rw-r--r--drivers/rtc/rtc-pcf85063.c19
-rw-r--r--drivers/rtc/rtc-rv3032.c2
-rw-r--r--drivers/s390/char/sclp_con.c17
-rw-r--r--drivers/s390/char/sclp_tty.c12
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c72
-rw-r--r--drivers/scsi/aacraid/aachba.c4
-rw-r--r--drivers/scsi/arm/acornscsi.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c20
-rw-r--r--drivers/scsi/ips.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c29
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c2
-rw-r--r--drivers/scsi/megaraid.c10
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c10
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c10
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c12
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c1
-rw-r--r--drivers/scsi/scsi.c36
-rw-r--r--drivers/scsi/scsi_debug.c55
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/scsi/scsi_sysctl.c4
-rw-r--r--drivers/scsi/sd_zbc.c6
-rw-r--r--drivers/scsi/st.c29
-rw-r--r--drivers/scsi/st.h2
-rw-r--r--drivers/scsi/storvsc_drv.c1
-rw-r--r--drivers/soc/apple/rtkit-internal.h1
-rw-r--r--drivers/soc/apple/rtkit.c58
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c6
-rw-r--r--drivers/soc/qcom/ice.c48
-rw-r--r--drivers/soc/samsung/exynos-asv.c1
-rw-r--r--drivers/soc/samsung/exynos-chipid.c1
-rw-r--r--drivers/soc/samsung/exynos-pmu.c1
-rw-r--r--drivers/soc/samsung/exynos-usi.c1
-rw-r--r--drivers/soc/samsung/exynos3250-pmu.c1
-rw-r--r--drivers/soc/samsung/exynos5250-pmu.c1
-rw-r--r--drivers/soc/samsung/exynos5420-pmu.c1
-rw-r--r--drivers/soc/ti/k3-socinfo.c13
-rw-r--r--drivers/soundwire/amd_manager.c2
-rw-r--r--drivers/soundwire/bus.c9
-rw-r--r--drivers/soundwire/cadence_master.c22
-rw-r--r--drivers/spi/spi-fsl-dspi.c46
-rw-r--r--drivers/spi/spi-imx.c5
-rw-r--r--drivers/spi/spi-loopback-test.c2
-rw-r--r--drivers/spi/spi-mem.c6
-rw-r--r--drivers/spi/spi-mux.c4
-rw-r--r--drivers/spi/spi-rockchip.c2
-rw-r--r--drivers/spi/spi-sun4i.c5
-rw-r--r--drivers/spi/spi-tegra114.c6
-rw-r--r--drivers/spi/spi-tegra210-quad.c6
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c20
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c14
-rw-r--r--drivers/staging/gpib/agilent_82350b/agilent_82350b.c10
-rw-r--r--drivers/staging/iio/adc/ad7816.c2
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c1
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c69
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/target/target_core_device.c8
-rw-r--r--drivers/target/target_core_pr.c6
-rw-r--r--drivers/target/target_core_spc.c34
-rw-r--r--drivers/thermal/intel/x86_pkg_temp_thermal.c1
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c3
-rw-r--r--drivers/thermal/qoriq_thermal.c13
-rw-r--r--drivers/thunderbolt/ctl.c5
-rw-r--r--drivers/thunderbolt/retimer.c8
-rw-r--r--drivers/thunderbolt/tb.c16
-rw-r--r--drivers/tty/serial/8250/8250_port.c2
-rw-r--r--drivers/tty/serial/atmel_serial.c2
-rw-r--r--drivers/tty/serial/imx.c2
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c1
-rw-r--r--drivers/tty/serial/msm_serial.c6
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c34
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.h17
-rw-r--r--drivers/tty/serial/sh-sci.c98
-rw-r--r--drivers/tty/serial/sifive.c6
-rw-r--r--drivers/tty/serial/stm32-usart.c2
-rw-r--r--drivers/tty/vt/selection.c5
-rw-r--r--drivers/ufs/core/ufs-mcq.c12
-rw-r--r--drivers/ufs/core/ufshcd.c33
-rw-r--r--drivers/ufs/host/ufs-exynos.c44
-rw-r--r--drivers/ufs/host/ufs-exynos.h1
-rw-r--r--drivers/ufs/host/ufs-qcom.c2
-rw-r--r--drivers/uio/uio_hv_generic.c39
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c2
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c31
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.h6
-rw-r--r--drivers/usb/cdns3/cdnsp-pci.c12
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c3
-rw-r--r--drivers/usb/cdns3/core.h3
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c44
-rw-r--r--drivers/usb/class/cdc-wdm.c21
-rw-r--r--drivers/usb/class/usbtmc.c63
-rw-r--r--drivers/usb/core/quirks.c12
-rw-r--r--drivers/usb/dwc3/core.h4
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c10
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c4
-rw-r--r--drivers/usb/dwc3/gadget.c88
-rw-r--r--drivers/usb/gadget/composite.c12
-rw-r--r--drivers/usb/gadget/function/f_ecm.c7
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c4
-rw-r--r--drivers/usb/gadget/function/f_midi2.c2
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/dev.c3
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c4
-rw-r--r--drivers/usb/host/max3421-hcd.c7
-rw-r--r--drivers/usb/host/ohci-pci.c23
-rw-r--r--drivers/usb/host/uhci-platform.c2
-rw-r--r--drivers/usb/host/xhci-dbgcap.c19
-rw-r--r--drivers/usb/host/xhci-dbgcap.h3
-rw-r--r--drivers/usb/host/xhci-hub.c30
-rw-r--r--drivers/usb/host/xhci-mem.c34
-rw-r--r--drivers/usb/host/xhci-mvebu.c10
-rw-r--r--drivers/usb/host/xhci-mvebu.h6
-rw-r--r--drivers/usb/host/xhci-plat.c2
-rw-r--r--drivers/usb/host/xhci-ring.c87
-rw-r--r--drivers/usb/host/xhci-tegra.c3
-rw-r--r--drivers/usb/host/xhci.c4
-rw-r--r--drivers/usb/host/xhci.h12
-rw-r--r--drivers/usb/misc/onboard_usb_dev.c10
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h5
-rw-r--r--drivers/usb/serial/option.c3
-rw-r--r--drivers/usb/serial/pl2303.c2
-rw-r--r--drivers/usb/serial/usb-serial-simple.c7
-rw-r--r--drivers/usb/storage/debug.c4
-rw-r--r--drivers/usb/storage/unusual_uas.h14
-rw-r--r--drivers/usb/typec/class.c24
-rw-r--r--drivers/usb/typec/class.h1
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c2
-rw-r--r--drivers/usb/typec/ucsi/cros_ec_ucsi.c5
-rw-r--r--drivers/usb/typec/ucsi/displayport.c21
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c53
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h10
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c5
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c67
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c3
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c3
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c22
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c2
-rw-r--r--drivers/vhost/scsi.c103
-rw-r--r--drivers/video/fbdev/core/bitblit.c5
-rw-r--r--drivers/video/fbdev/core/fbcon.c10
-rw-r--r--drivers/video/fbdev/core/fbcon.h38
-rw-r--r--drivers/video/fbdev/core/fbcon_ccw.c5
-rw-r--r--drivers/video/fbdev/core/fbcon_cw.c5
-rw-r--r--drivers/video/fbdev/core/fbcon_ud.c5
-rw-r--r--drivers/video/fbdev/core/tileblit.c45
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c1
-rw-r--r--drivers/virtio/virtio.c35
-rw-r--r--drivers/virtio/virtio_pci_modern.c4
-rw-r--r--drivers/virtio/virtio_ring.c2
-rw-r--r--drivers/watchdog/aspeed_wdt.c81
-rw-r--r--drivers/watchdog/s3c2410_wdt.c10
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/pci.c32
-rw-r--r--drivers/xen/platform-pci.c4
-rw-r--r--drivers/xen/swiotlb-xen.c1
-rw-r--r--drivers/xen/xenbus/xenbus.h2
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c9
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c14
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c18
1026 files changed, 14390 insertions, 7409 deletions
diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
index 5f43db02b240..92c768b0c9a0 100644
--- a/drivers/accel/amdxdna/aie2_ctx.c
+++ b/drivers/accel/amdxdna/aie2_ctx.c
@@ -34,6 +34,8 @@ static void aie2_job_release(struct kref *ref)
job = container_of(ref, struct amdxdna_sched_job, refcnt);
amdxdna_sched_job_cleanup(job);
+ atomic64_inc(&job->hwctx->job_free_cnt);
+ wake_up(&job->hwctx->priv->job_free_wq);
if (job->out_fence)
dma_fence_put(job->out_fence);
kfree(job);
@@ -134,7 +136,8 @@ static void aie2_hwctx_wait_for_idle(struct amdxdna_hwctx *hwctx)
if (!fence)
return;
- dma_fence_wait(fence, false);
+ /* Wait up to 2 seconds for fw to finish all pending requests */
+ dma_fence_wait_timeout(fence, false, msecs_to_jiffies(2000));
dma_fence_put(fence);
}
@@ -616,6 +619,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
hwctx->status = HWCTX_STAT_INIT;
ndev = xdna->dev_handle;
ndev->hwctx_num++;
+ init_waitqueue_head(&priv->job_free_wq);
XDNA_DBG(xdna, "hwctx %s init completed", hwctx->name);
@@ -652,25 +656,23 @@ void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx)
xdna = hwctx->client->xdna;
ndev = xdna->dev_handle;
ndev->hwctx_num--;
- drm_sched_wqueue_stop(&hwctx->priv->sched);
- /* Now, scheduler will not send command to device. */
+ XDNA_DBG(xdna, "%s sequence number %lld", hwctx->name, hwctx->priv->seq);
+ drm_sched_entity_destroy(&hwctx->priv->entity);
+
+ aie2_hwctx_wait_for_idle(hwctx);
+
+ /* Request fw to destroy hwctx and cancel the rest pending requests */
aie2_release_resource(hwctx);
- /*
- * All submitted commands are aborted.
- * Restart scheduler queues to cleanup jobs. The amdxdna_sched_job_run()
- * will return NODEV if it is called.
- */
- drm_sched_wqueue_start(&hwctx->priv->sched);
+ /* Wait for all submitted jobs to be completed or canceled */
+ wait_event(hwctx->priv->job_free_wq,
+ atomic64_read(&hwctx->job_submit_cnt) ==
+ atomic64_read(&hwctx->job_free_cnt));
- aie2_hwctx_wait_for_idle(hwctx);
- drm_sched_entity_destroy(&hwctx->priv->entity);
drm_sched_fini(&hwctx->priv->sched);
aie2_ctx_syncobj_destroy(hwctx);
- XDNA_DBG(xdna, "%s sequence number %lld", hwctx->name, hwctx->priv->seq);
-
for (idx = 0; idx < ARRAY_SIZE(hwctx->priv->cmd_buf); idx++)
drm_gem_object_put(to_gobj(hwctx->priv->cmd_buf[idx]));
amdxdna_gem_unpin(hwctx->priv->heap);
@@ -879,6 +881,7 @@ retry:
drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
aie2_job_put(job);
+ atomic64_inc(&hwctx->job_submit_cnt);
return 0;
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
index d11b1c83d9c3..43442b9e273b 100644
--- a/drivers/accel/amdxdna/amdxdna_ctx.c
+++ b/drivers/accel/amdxdna/amdxdna_ctx.c
@@ -220,6 +220,8 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
args->syncobj_handle = hwctx->syncobj_hdl;
mutex_unlock(&xdna->dev_lock);
+ atomic64_set(&hwctx->job_submit_cnt, 0);
+ atomic64_set(&hwctx->job_free_cnt, 0);
XDNA_DBG(xdna, "PID %d create HW context %d, ret %d", client->pid, args->handle, ret);
drm_dev_exit(idx);
return 0;
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.h b/drivers/accel/amdxdna/amdxdna_ctx.h
index 80b0304193ec..f0a4a8586d85 100644
--- a/drivers/accel/amdxdna/amdxdna_ctx.h
+++ b/drivers/accel/amdxdna/amdxdna_ctx.h
@@ -87,6 +87,9 @@ struct amdxdna_hwctx {
struct amdxdna_qos_info qos;
struct amdxdna_hwctx_param_config_cu *cus;
u32 syncobj_hdl;
+
+ atomic64_t job_submit_cnt;
+ atomic64_t job_free_cnt ____cacheline_aligned_in_smp;
};
#define drm_job_to_xdna_job(j) \
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.c b/drivers/accel/amdxdna/amdxdna_mailbox.c
index e5301fac1397..2879e4149c93 100644
--- a/drivers/accel/amdxdna/amdxdna_mailbox.c
+++ b/drivers/accel/amdxdna/amdxdna_mailbox.c
@@ -349,8 +349,6 @@ static irqreturn_t mailbox_irq_handler(int irq, void *p)
trace_mbox_irq_handle(MAILBOX_NAME, irq);
/* Schedule a rx_work to call the callback functions */
queue_work(mb_chann->work_q, &mb_chann->rx_work);
- /* Clear IOHUB register */
- mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0);
return IRQ_HANDLED;
}
@@ -367,6 +365,9 @@ static void mailbox_rx_worker(struct work_struct *rx_work)
return;
}
+again:
+ mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0);
+
while (1) {
/*
* If return is 0, keep consuming next message, until there is
@@ -380,10 +381,18 @@ static void mailbox_rx_worker(struct work_struct *rx_work)
if (unlikely(ret)) {
MB_ERR(mb_chann, "Unexpected ret %d, disable irq", ret);
WRITE_ONCE(mb_chann->bad_state, true);
- disable_irq(mb_chann->msix_irq);
- break;
+ return;
}
}
+
+ /*
+ * The hardware will not generate interrupt if firmware creates a new
+ * response right after driver clears interrupt register. Check
+ * the interrupt register to make sure there is not any new response
+ * before exiting.
+ */
+ if (mailbox_reg_read(mb_chann, mb_chann->iohub_int_addr))
+ goto again;
}
int xdna_mailbox_send_msg(struct mailbox_channel *mb_chann,
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 3e56ce8bc2c1..c74fc4958bb9 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
+#include <linux/workqueue.h>
#include <generated/utsrelease.h>
#include <drm/drm_accel.h>
@@ -36,8 +37,6 @@
#define DRIVER_VERSION_STR "1.0.0 " UTS_RELEASE
#endif
-static struct lock_class_key submitted_jobs_xa_lock_class_key;
-
int ivpu_dbg_mask;
module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
@@ -421,6 +420,9 @@ void ivpu_prepare_for_reset(struct ivpu_device *vdev)
{
ivpu_hw_irq_disable(vdev);
disable_irq(vdev->irq);
+ flush_work(&vdev->irq_ipc_work);
+ flush_work(&vdev->irq_dct_work);
+ flush_work(&vdev->context_abort_work);
ivpu_ipc_disable(vdev);
ivpu_mmu_disable(vdev);
}
@@ -465,54 +467,6 @@ static const struct drm_driver driver = {
.major = 1,
};
-static void ivpu_context_abort_invalid(struct ivpu_device *vdev)
-{
- struct ivpu_file_priv *file_priv;
- unsigned long ctx_id;
-
- mutex_lock(&vdev->context_list_lock);
-
- xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
- if (!file_priv->has_mmu_faults || file_priv->aborted)
- continue;
-
- mutex_lock(&file_priv->lock);
- ivpu_context_abort_locked(file_priv);
- file_priv->aborted = true;
- mutex_unlock(&file_priv->lock);
- }
-
- mutex_unlock(&vdev->context_list_lock);
-}
-
-static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
-{
- struct ivpu_device *vdev = arg;
- u8 irq_src;
-
- if (kfifo_is_empty(&vdev->hw->irq.fifo))
- return IRQ_NONE;
-
- while (kfifo_get(&vdev->hw->irq.fifo, &irq_src)) {
- switch (irq_src) {
- case IVPU_HW_IRQ_SRC_IPC:
- ivpu_ipc_irq_thread_handler(vdev);
- break;
- case IVPU_HW_IRQ_SRC_MMU_EVTQ:
- ivpu_context_abort_invalid(vdev);
- break;
- case IVPU_HW_IRQ_SRC_DCT:
- ivpu_pm_dct_irq_thread_handler(vdev);
- break;
- default:
- ivpu_err_ratelimited(vdev, "Unknown IRQ source: %u\n", irq_src);
- break;
- }
- }
-
- return IRQ_HANDLED;
-}
-
static int ivpu_irq_init(struct ivpu_device *vdev)
{
struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
@@ -524,12 +478,16 @@ static int ivpu_irq_init(struct ivpu_device *vdev)
return ret;
}
+ INIT_WORK(&vdev->irq_ipc_work, ivpu_ipc_irq_work_fn);
+ INIT_WORK(&vdev->irq_dct_work, ivpu_pm_irq_dct_work_fn);
+ INIT_WORK(&vdev->context_abort_work, ivpu_context_abort_work_fn);
+
ivpu_irq_handlers_init(vdev);
vdev->irq = pci_irq_vector(pdev, 0);
- ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, ivpu_hw_irq_handler,
- ivpu_irq_thread_handler, IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
+ ret = devm_request_irq(vdev->drm.dev, vdev->irq, ivpu_hw_irq_handler,
+ IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
if (ret)
ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
@@ -617,7 +575,6 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
- lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
INIT_LIST_HEAD(&vdev->bo_list);
vdev->db_limit.min = IVPU_MIN_DB;
@@ -627,6 +584,10 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
if (ret)
goto err_xa_destroy;
+ ret = drmm_mutex_init(&vdev->drm, &vdev->submitted_jobs_lock);
+ if (ret)
+ goto err_xa_destroy;
+
ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
if (ret)
goto err_xa_destroy;
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 3fdff3f6cffd..b57d878f2fcd 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -142,9 +142,14 @@ struct ivpu_device {
struct xa_limit db_limit;
u32 db_next;
+ struct work_struct irq_ipc_work;
+ struct work_struct irq_dct_work;
+ struct work_struct context_abort_work;
+
struct mutex bo_list_lock; /* Protects bo_list */
struct list_head bo_list;
+ struct mutex submitted_jobs_lock; /* Protects submitted_jobs */
struct xarray submitted_jobs_xa;
struct ivpu_ipc_consumer job_done_consumer;
diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c
index 4e1054f3466e..d19e976893f8 100644
--- a/drivers/accel/ivpu/ivpu_hw.c
+++ b/drivers/accel/ivpu/ivpu_hw.c
@@ -106,7 +106,7 @@ static void timeouts_init(struct ivpu_device *vdev)
else
vdev->timeout.autosuspend = 100;
vdev->timeout.d0i3_entry_msg = 5;
- vdev->timeout.state_dump_msg = 10;
+ vdev->timeout.state_dump_msg = 100;
}
}
@@ -285,8 +285,6 @@ void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
void ivpu_irq_handlers_init(struct ivpu_device *vdev)
{
- INIT_KFIFO(vdev->hw->irq.fifo);
-
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_37xx;
else
@@ -300,7 +298,6 @@ void ivpu_irq_handlers_init(struct ivpu_device *vdev)
void ivpu_hw_irq_enable(struct ivpu_device *vdev)
{
- kfifo_reset(&vdev->hw->irq.fifo);
ivpu_hw_ip_irq_enable(vdev);
ivpu_hw_btrs_irq_enable(vdev);
}
@@ -327,8 +324,6 @@ irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr)
/* Re-enable global interrupts to re-trigger MSI for pending interrupts */
ivpu_hw_btrs_global_int_enable(vdev);
- if (!kfifo_is_empty(&vdev->hw->irq.fifo))
- return IRQ_WAKE_THREAD;
if (ip_handled || btrs_handled)
return IRQ_HANDLED;
return IRQ_NONE;
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
index 1e85306bcd06..aa8fdbf2fc22 100644
--- a/drivers/accel/ivpu/ivpu_hw.h
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -6,18 +6,10 @@
#ifndef __IVPU_HW_H__
#define __IVPU_HW_H__
-#include <linux/kfifo.h>
-
#include "ivpu_drv.h"
#include "ivpu_hw_btrs.h"
#include "ivpu_hw_ip.h"
-#define IVPU_HW_IRQ_FIFO_LENGTH 1024
-
-#define IVPU_HW_IRQ_SRC_IPC 1
-#define IVPU_HW_IRQ_SRC_MMU_EVTQ 2
-#define IVPU_HW_IRQ_SRC_DCT 3
-
struct ivpu_addr_range {
resource_size_t start;
resource_size_t end;
@@ -27,7 +19,6 @@ struct ivpu_hw_info {
struct {
bool (*btrs_irq_handler)(struct ivpu_device *vdev, int irq);
bool (*ip_irq_handler)(struct ivpu_device *vdev, int irq);
- DECLARE_KFIFO(fifo, u8, IVPU_HW_IRQ_FIFO_LENGTH);
} irq;
struct {
struct ivpu_addr_range global;
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.c b/drivers/accel/ivpu/ivpu_hw_btrs.c
index 51b9581bb60a..e17c69fe5a03 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.c
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.c
@@ -666,8 +666,7 @@ bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq)
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) {
ivpu_dbg(vdev, IRQ, "Survivability IRQ\n");
- if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_DCT))
- ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
+ queue_work(system_wq, &vdev->irq_dct_work);
}
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status)) {
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.h b/drivers/accel/ivpu/ivpu_hw_btrs.h
index 71792dab3c21..3855e2df1e0c 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.h
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.h
@@ -14,7 +14,7 @@
#define PLL_PROFILING_FREQ_DEFAULT 38400000
#define PLL_PROFILING_FREQ_HIGH 400000000
-#define DCT_DEFAULT_ACTIVE_PERCENT 15u
+#define DCT_DEFAULT_ACTIVE_PERCENT 30u
#define DCT_PERIOD_US 35300u
int ivpu_hw_btrs_info_init(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index 5daaf07fc1a7..39f83225c181 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -460,13 +460,12 @@ void ivpu_ipc_irq_handler(struct ivpu_device *vdev)
}
}
- if (!list_empty(&ipc->cb_msg_list))
- if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_IPC))
- ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
+ queue_work(system_wq, &vdev->irq_ipc_work);
}
-void ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev)
+void ivpu_ipc_irq_work_fn(struct work_struct *work)
{
+ struct ivpu_device *vdev = container_of(work, struct ivpu_device, irq_ipc_work);
struct ivpu_ipc_info *ipc = vdev->ipc;
struct ivpu_ipc_rx_msg *rx_msg, *r;
struct list_head cb_msg_list;
diff --git a/drivers/accel/ivpu/ivpu_ipc.h b/drivers/accel/ivpu/ivpu_ipc.h
index b4dfb504679b..b524a1985b9d 100644
--- a/drivers/accel/ivpu/ivpu_ipc.h
+++ b/drivers/accel/ivpu/ivpu_ipc.h
@@ -90,7 +90,7 @@ void ivpu_ipc_disable(struct ivpu_device *vdev);
void ivpu_ipc_reset(struct ivpu_device *vdev);
void ivpu_ipc_irq_handler(struct ivpu_device *vdev);
-void ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev);
+void ivpu_ipc_irq_work_fn(struct work_struct *work);
void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
u32 channel, ivpu_ipc_rx_callback_t callback);
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index 7149312f16e1..4a8013f669f9 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -17,6 +17,7 @@
#include "ivpu_ipc.h"
#include "ivpu_job.h"
#include "ivpu_jsm_msg.h"
+#include "ivpu_mmu.h"
#include "ivpu_pm.h"
#include "ivpu_trace.h"
#include "vpu_boot_api.h"
@@ -83,23 +84,9 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
if (!cmdq)
return NULL;
- ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next,
- GFP_KERNEL);
- if (ret < 0) {
- ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret);
- goto err_free_cmdq;
- }
-
- ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
- &file_priv->cmdq_id_next, GFP_KERNEL);
- if (ret < 0) {
- ivpu_err(vdev, "Failed to allocate command queue id: %d\n", ret);
- goto err_erase_db_xa;
- }
-
cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
if (!cmdq->mem)
- goto err_erase_cmdq_xa;
+ goto err_free_cmdq;
ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
if (ret)
@@ -107,10 +94,6 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
return cmdq;
-err_erase_cmdq_xa:
- xa_erase(&file_priv->cmdq_xa, cmdq->id);
-err_erase_db_xa:
- xa_erase(&vdev->db_xa, cmdq->db_id);
err_free_cmdq:
kfree(cmdq);
return NULL;
@@ -223,7 +206,8 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->id);
if (!ret)
- ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->id);
+ ivpu_dbg(vdev, JOB, "Command queue %d destroyed, ctx %d\n",
+ cmdq->id, file_priv->ctx.id);
}
ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
@@ -233,30 +217,88 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm
return 0;
}
+static int ivpu_db_id_alloc(struct ivpu_device *vdev, u32 *db_id)
+{
+ int ret;
+ u32 id;
+
+ ret = xa_alloc_cyclic(&vdev->db_xa, &id, NULL, vdev->db_limit, &vdev->db_next, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ *db_id = id;
+ return 0;
+}
+
+static int ivpu_cmdq_id_alloc(struct ivpu_file_priv *file_priv, u32 *cmdq_id)
+{
+ int ret;
+ u32 id;
+
+ ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &id, NULL, file_priv->cmdq_limit,
+ &file_priv->cmdq_id_next, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ *cmdq_id = id;
+ return 0;
+}
+
static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u8 priority)
{
+ struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_cmdq *cmdq;
- unsigned long cmdq_id;
+ unsigned long id;
int ret;
lockdep_assert_held(&file_priv->lock);
- xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
+ xa_for_each(&file_priv->cmdq_xa, id, cmdq)
if (cmdq->priority == priority)
break;
if (!cmdq) {
cmdq = ivpu_cmdq_alloc(file_priv);
- if (!cmdq)
+ if (!cmdq) {
+ ivpu_err(vdev, "Failed to allocate command queue\n");
return NULL;
+ }
+
+ ret = ivpu_db_id_alloc(vdev, &cmdq->db_id);
+ if (ret) {
+ ivpu_err(file_priv->vdev, "Failed to allocate doorbell ID: %d\n", ret);
+ goto err_free_cmdq;
+ }
+
+ ret = ivpu_cmdq_id_alloc(file_priv, &cmdq->id);
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate command queue ID: %d\n", ret);
+ goto err_erase_db_id;
+ }
+
cmdq->priority = priority;
+ ret = xa_err(xa_store(&file_priv->cmdq_xa, cmdq->id, cmdq, GFP_KERNEL));
+ if (ret) {
+ ivpu_err(vdev, "Failed to store command queue in cmdq_xa: %d\n", ret);
+ goto err_erase_cmdq_id;
+ }
}
ret = ivpu_cmdq_init(file_priv, cmdq, priority);
- if (ret)
- return NULL;
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize command queue: %d\n", ret);
+ goto err_free_cmdq;
+ }
return cmdq;
+
+err_erase_cmdq_id:
+ xa_erase(&file_priv->cmdq_xa, cmdq->id);
+err_erase_db_id:
+ xa_erase(&vdev->db_xa, cmdq->db_id);
+err_free_cmdq:
+ ivpu_cmdq_free(file_priv, cmdq);
+ return NULL;
}
void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
@@ -319,11 +361,16 @@ void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
struct ivpu_device *vdev = file_priv->vdev;
lockdep_assert_held(&file_priv->lock);
+ ivpu_dbg(vdev, JOB, "Context ID: %u abort\n", file_priv->ctx.id);
ivpu_cmdq_fini_all(file_priv);
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS)
ivpu_jsm_context_release(vdev, file_priv->ctx.id);
+
+ ivpu_mmu_disable_ssid_events(vdev, file_priv->ctx.id);
+
+ file_priv->aborted = true;
}
static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
@@ -462,16 +509,14 @@ static struct ivpu_job *ivpu_job_remove_from_submitted_jobs(struct ivpu_device *
{
struct ivpu_job *job;
- xa_lock(&vdev->submitted_jobs_xa);
- job = __xa_erase(&vdev->submitted_jobs_xa, job_id);
+ lockdep_assert_held(&vdev->submitted_jobs_lock);
+ job = xa_erase(&vdev->submitted_jobs_xa, job_id);
if (xa_empty(&vdev->submitted_jobs_xa) && job) {
vdev->busy_time = ktime_add(ktime_sub(ktime_get(), vdev->busy_start_ts),
vdev->busy_time);
}
- xa_unlock(&vdev->submitted_jobs_xa);
-
return job;
}
@@ -479,6 +524,28 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
{
struct ivpu_job *job;
+ lockdep_assert_held(&vdev->submitted_jobs_lock);
+
+ job = xa_load(&vdev->submitted_jobs_xa, job_id);
+ if (!job)
+ return -ENOENT;
+
+ if (job_status == VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW) {
+ guard(mutex)(&job->file_priv->lock);
+
+ if (job->file_priv->has_mmu_faults)
+ return 0;
+
+ /*
+ * Mark context as faulty and defer destruction of the job to jobs abort thread
+ * handler to synchronize between both faults and jobs returning context violation
+ * status and ensure both are handled in the same way
+ */
+ job->file_priv->has_mmu_faults = true;
+ queue_work(system_wq, &vdev->context_abort_work);
+ return 0;
+ }
+
job = ivpu_job_remove_from_submitted_jobs(vdev, job_id);
if (!job)
return -ENOENT;
@@ -497,6 +564,10 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
ivpu_stop_job_timeout_detection(vdev);
ivpu_rpm_put(vdev);
+
+ if (!xa_empty(&vdev->submitted_jobs_xa))
+ ivpu_start_job_timeout_detection(vdev);
+
return 0;
}
@@ -505,8 +576,12 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev)
struct ivpu_job *job;
unsigned long id;
+ mutex_lock(&vdev->submitted_jobs_lock);
+
xa_for_each(&vdev->submitted_jobs_xa, id, job)
ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
+
+ mutex_unlock(&vdev->submitted_jobs_lock);
}
static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
@@ -521,6 +596,7 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
if (ret < 0)
return ret;
+ mutex_lock(&vdev->submitted_jobs_lock);
mutex_lock(&file_priv->lock);
cmdq = ivpu_cmdq_acquire(file_priv, priority);
@@ -528,18 +604,17 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d engine %d prio %d\n",
file_priv->ctx.id, job->engine_idx, priority);
ret = -EINVAL;
- goto err_unlock_file_priv;
+ goto err_unlock;
}
- xa_lock(&vdev->submitted_jobs_xa);
is_first_job = xa_empty(&vdev->submitted_jobs_xa);
- ret = __xa_alloc_cyclic(&vdev->submitted_jobs_xa, &job->job_id, job, file_priv->job_limit,
- &file_priv->job_id_next, GFP_KERNEL);
+ ret = xa_alloc_cyclic(&vdev->submitted_jobs_xa, &job->job_id, job, file_priv->job_limit,
+ &file_priv->job_id_next, GFP_KERNEL);
if (ret < 0) {
ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
file_priv->ctx.id);
ret = -EBUSY;
- goto err_unlock_submitted_jobs_xa;
+ goto err_unlock;
}
ret = ivpu_cmdq_push_job(cmdq, job);
@@ -562,21 +637,21 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
job->job_id, file_priv->ctx.id, job->engine_idx, priority,
job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
- xa_unlock(&vdev->submitted_jobs_xa);
-
mutex_unlock(&file_priv->lock);
- if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW))
+ if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
+ }
+
+ mutex_unlock(&vdev->submitted_jobs_lock);
return 0;
err_erase_xa:
- __xa_erase(&vdev->submitted_jobs_xa, job->job_id);
-err_unlock_submitted_jobs_xa:
- xa_unlock(&vdev->submitted_jobs_xa);
-err_unlock_file_priv:
+ xa_erase(&vdev->submitted_jobs_xa, job->job_id);
+err_unlock:
mutex_unlock(&file_priv->lock);
+ mutex_unlock(&vdev->submitted_jobs_lock);
ivpu_rpm_put(vdev);
return ret;
}
@@ -745,7 +820,6 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
struct vpu_jsm_msg *jsm_msg)
{
struct vpu_ipc_msg_payload_job_done *payload;
- int ret;
if (!jsm_msg) {
ivpu_err(vdev, "IPC message has no JSM payload\n");
@@ -758,9 +832,10 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
}
payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
- ret = ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
- if (!ret && !xa_empty(&vdev->submitted_jobs_xa))
- ivpu_start_job_timeout_detection(vdev);
+
+ mutex_lock(&vdev->submitted_jobs_lock);
+ ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
+ mutex_unlock(&vdev->submitted_jobs_lock);
}
void ivpu_job_done_consumer_init(struct ivpu_device *vdev)
@@ -773,3 +848,48 @@ void ivpu_job_done_consumer_fini(struct ivpu_device *vdev)
{
ivpu_ipc_consumer_del(vdev, &vdev->job_done_consumer);
}
+
+void ivpu_context_abort_work_fn(struct work_struct *work)
+{
+ struct ivpu_device *vdev = container_of(work, struct ivpu_device, context_abort_work);
+ struct ivpu_file_priv *file_priv;
+ struct ivpu_job *job;
+ unsigned long ctx_id;
+ unsigned long id;
+
+ if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
+ ivpu_jsm_reset_engine(vdev, 0);
+
+ mutex_lock(&vdev->context_list_lock);
+ xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
+ if (!file_priv->has_mmu_faults || file_priv->aborted)
+ continue;
+
+ mutex_lock(&file_priv->lock);
+ ivpu_context_abort_locked(file_priv);
+ mutex_unlock(&file_priv->lock);
+ }
+ mutex_unlock(&vdev->context_list_lock);
+
+ /*
+ * We will not receive new MMU event interrupts until existing events are discarded
+ * however, we want to discard these events only after aborting the faulty context
+ * to avoid generating new faults from that context
+ */
+ ivpu_mmu_discard_events(vdev);
+
+ if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
+ return;
+
+ ivpu_jsm_hws_resume_engine(vdev, 0);
+ /*
+ * In hardware scheduling mode NPU already has stopped processing jobs
+ * and won't send us any further notifications, thus we have to free job related resources
+ * and notify userspace
+ */
+ mutex_lock(&vdev->submitted_jobs_lock);
+ xa_for_each(&vdev->submitted_jobs_xa, id, job)
+ if (job->file_priv->aborted)
+ ivpu_job_signal_and_destroy(vdev, job->job_id, DRM_IVPU_JOB_STATUS_ABORTED);
+ mutex_unlock(&vdev->submitted_jobs_lock);
+}
diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h
index 8b19e3f8b4cf..8c0e1845e5dd 100644
--- a/drivers/accel/ivpu/ivpu_job.h
+++ b/drivers/accel/ivpu/ivpu_job.h
@@ -66,6 +66,7 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
void ivpu_job_done_consumer_fini(struct ivpu_device *vdev);
+void ivpu_context_abort_work_fn(struct work_struct *work);
void ivpu_jobs_abort_all(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
index 26ef52fbb93e..b80bdded9fd7 100644
--- a/drivers/accel/ivpu/ivpu_mmu.c
+++ b/drivers/accel/ivpu/ivpu_mmu.c
@@ -20,6 +20,12 @@
#define IVPU_MMU_REG_CR0 0x00200020u
#define IVPU_MMU_REG_CR0ACK 0x00200024u
#define IVPU_MMU_REG_CR0ACK_VAL_MASK GENMASK(31, 0)
+#define IVPU_MMU_REG_CR0_ATSCHK_MASK BIT(4)
+#define IVPU_MMU_REG_CR0_CMDQEN_MASK BIT(3)
+#define IVPU_MMU_REG_CR0_EVTQEN_MASK BIT(2)
+#define IVPU_MMU_REG_CR0_PRIQEN_MASK BIT(1)
+#define IVPU_MMU_REG_CR0_SMMUEN_MASK BIT(0)
+
#define IVPU_MMU_REG_CR1 0x00200028u
#define IVPU_MMU_REG_CR2 0x0020002cu
#define IVPU_MMU_REG_IRQ_CTRL 0x00200050u
@@ -141,12 +147,6 @@
#define IVPU_MMU_IRQ_EVTQ_EN BIT(2)
#define IVPU_MMU_IRQ_GERROR_EN BIT(0)
-#define IVPU_MMU_CR0_ATSCHK BIT(4)
-#define IVPU_MMU_CR0_CMDQEN BIT(3)
-#define IVPU_MMU_CR0_EVTQEN BIT(2)
-#define IVPU_MMU_CR0_PRIQEN BIT(1)
-#define IVPU_MMU_CR0_SMMUEN BIT(0)
-
#define IVPU_MMU_CR1_TABLE_SH GENMASK(11, 10)
#define IVPU_MMU_CR1_TABLE_OC GENMASK(9, 8)
#define IVPU_MMU_CR1_TABLE_IC GENMASK(7, 6)
@@ -596,7 +596,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, 0);
REGV_WR32(IVPU_MMU_REG_CMDQ_CONS, 0);
- val = IVPU_MMU_CR0_CMDQEN;
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, CMDQEN, 0);
ret = ivpu_mmu_reg_write_cr0(vdev, val);
if (ret)
return ret;
@@ -617,12 +617,12 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
REGV_WR32(IVPU_MMU_REG_EVTQ_PROD_SEC, 0);
REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, 0);
- val |= IVPU_MMU_CR0_EVTQEN;
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, EVTQEN, val);
ret = ivpu_mmu_reg_write_cr0(vdev, val);
if (ret)
return ret;
- val |= IVPU_MMU_CR0_ATSCHK;
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, ATSCHK, val);
ret = ivpu_mmu_reg_write_cr0(vdev, val);
if (ret)
return ret;
@@ -631,7 +631,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
if (ret)
return ret;
- val |= IVPU_MMU_CR0_SMMUEN;
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, SMMUEN, val);
return ivpu_mmu_reg_write_cr0(vdev, val);
}
@@ -725,8 +725,8 @@ static int ivpu_mmu_cdtab_entry_set(struct ivpu_device *vdev, u32 ssid, u64 cd_d
cd[2] = 0;
cd[3] = 0x0000000000007444;
- /* For global context generate memory fault on VPU */
- if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
+ /* For global and reserved contexts generate memory fault on VPU */
+ if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID || ssid == IVPU_RESERVED_CONTEXT_MMU_SSID)
cd[0] |= IVPU_MMU_CD_0_A;
if (valid)
@@ -870,28 +870,98 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
return evt;
}
+static int ivpu_mmu_evtq_set(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(IVPU_MMU_REG_CR0);
+
+ if (enable)
+ val = REG_SET_FLD(IVPU_MMU_REG_CR0, EVTQEN, val);
+ else
+ val = REG_CLR_FLD(IVPU_MMU_REG_CR0, EVTQEN, val);
+ REGV_WR32(IVPU_MMU_REG_CR0, val);
+
+ return REGV_POLL_FLD(IVPU_MMU_REG_CR0ACK, VAL, val, IVPU_MMU_REG_TIMEOUT_US);
+}
+
+static int ivpu_mmu_evtq_enable(struct ivpu_device *vdev)
+{
+ return ivpu_mmu_evtq_set(vdev, true);
+}
+
+static int ivpu_mmu_evtq_disable(struct ivpu_device *vdev)
+{
+ return ivpu_mmu_evtq_set(vdev, false);
+}
+
+void ivpu_mmu_discard_events(struct ivpu_device *vdev)
+{
+ /*
+ * Disable event queue (stop MMU from updating the producer)
+ * to allow synchronization of consumer and producer indexes
+ */
+ ivpu_mmu_evtq_disable(vdev);
+
+ vdev->mmu->evtq.cons = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
+ REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
+ vdev->mmu->evtq.prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
+
+ ivpu_mmu_evtq_enable(vdev);
+
+ drm_WARN_ON_ONCE(&vdev->drm, vdev->mmu->evtq.cons != vdev->mmu->evtq.prod);
+}
+
+int ivpu_mmu_disable_ssid_events(struct ivpu_device *vdev, u32 ssid)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
+ u64 *entry;
+ u64 val;
+
+ if (ssid > IVPU_MMU_CDTAB_ENT_COUNT)
+ return -EINVAL;
+
+ entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE);
+
+ val = READ_ONCE(entry[0]);
+ val &= ~IVPU_MMU_CD_0_R;
+ WRITE_ONCE(entry[0], val);
+
+ if (!ivpu_is_force_snoop_enabled(vdev))
+ clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE);
+
+ ivpu_mmu_cmdq_write_cfgi_all(vdev);
+ ivpu_mmu_cmdq_sync(vdev);
+
+ return 0;
+}
+
void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
{
+ struct ivpu_file_priv *file_priv;
u32 *event;
u32 ssid;
ivpu_dbg(vdev, IRQ, "MMU event queue\n");
- while ((event = ivpu_mmu_get_event(vdev)) != NULL) {
- ivpu_mmu_dump_event(vdev, event);
-
- ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
- if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID) {
+ while ((event = ivpu_mmu_get_event(vdev))) {
+ ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, *event);
+ if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID ||
+ ssid == IVPU_RESERVED_CONTEXT_MMU_SSID) {
+ ivpu_mmu_dump_event(vdev, event);
ivpu_pm_trigger_recovery(vdev, "MMU event");
return;
}
- ivpu_mmu_user_context_mark_invalid(vdev, ssid);
- REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
+ file_priv = xa_load(&vdev->context_xa, ssid);
+ if (file_priv) {
+ if (!READ_ONCE(file_priv->has_mmu_faults)) {
+ ivpu_mmu_dump_event(vdev, event);
+ WRITE_ONCE(file_priv->has_mmu_faults, true);
+ }
+ }
}
- if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_MMU_EVTQ))
- ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
+ queue_work(system_wq, &vdev->context_abort_work);
}
void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_mmu.h b/drivers/accel/ivpu/ivpu_mmu.h
index 7afea9cd8731..1ce7529746ad 100644
--- a/drivers/accel/ivpu/ivpu_mmu.h
+++ b/drivers/accel/ivpu/ivpu_mmu.h
@@ -47,5 +47,7 @@ int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid);
void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev);
void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev);
void ivpu_mmu_evtq_dump(struct ivpu_device *vdev);
+void ivpu_mmu_discard_events(struct ivpu_device *vdev);
+int ivpu_mmu_disable_ssid_events(struct ivpu_device *vdev, u32 ssid);
#endif /* __IVPU_MMU_H__ */
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
index 0af614dfb6f9..f0267efa55aa 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.c
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -635,16 +635,3 @@ void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev)
ivpu_mmu_cd_clear(vdev, vdev->rctx.id);
ivpu_mmu_context_fini(vdev, &vdev->rctx);
}
-
-void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid)
-{
- struct ivpu_file_priv *file_priv;
-
- xa_lock(&vdev->context_xa);
-
- file_priv = xa_load(&vdev->context_xa, ssid);
- if (file_priv)
- file_priv->has_mmu_faults = true;
-
- xa_unlock(&vdev->context_xa);
-}
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.h b/drivers/accel/ivpu/ivpu_mmu_context.h
index 8042fc067062..f255310968cf 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.h
+++ b/drivers/accel/ivpu/ivpu_mmu_context.h
@@ -37,8 +37,6 @@ void ivpu_mmu_global_context_fini(struct ivpu_device *vdev);
int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev);
void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev);
-void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid);
-
int ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range,
u64 size, struct drm_mm_node *node);
void ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node);
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index 5060c5dd40d1..57228e190e7f 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -433,16 +433,17 @@ int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent)
active_us = (DCT_PERIOD_US * active_percent) / 100;
inactive_us = DCT_PERIOD_US - active_us;
+ vdev->pm->dct_active_percent = active_percent;
+
+ ivpu_dbg(vdev, PM, "DCT requested %u%% (D0: %uus, D0i2: %uus)\n",
+ active_percent, active_us, inactive_us);
+
ret = ivpu_jsm_dct_enable(vdev, active_us, inactive_us);
if (ret) {
ivpu_err_ratelimited(vdev, "Failed to enable DCT: %d\n", ret);
return ret;
}
- vdev->pm->dct_active_percent = active_percent;
-
- ivpu_dbg(vdev, PM, "DCT set to %u%% (D0: %uus, D0i2: %uus)\n",
- active_percent, active_us, inactive_us);
return 0;
}
@@ -450,27 +451,29 @@ int ivpu_pm_dct_disable(struct ivpu_device *vdev)
{
int ret;
+ vdev->pm->dct_active_percent = 0;
+
+ ivpu_dbg(vdev, PM, "DCT requested to be disabled\n");
+
ret = ivpu_jsm_dct_disable(vdev);
if (ret) {
ivpu_err_ratelimited(vdev, "Failed to disable DCT: %d\n", ret);
return ret;
}
- vdev->pm->dct_active_percent = 0;
-
- ivpu_dbg(vdev, PM, "DCT disabled\n");
return 0;
}
-void ivpu_pm_dct_irq_thread_handler(struct ivpu_device *vdev)
+void ivpu_pm_irq_dct_work_fn(struct work_struct *work)
{
+ struct ivpu_device *vdev = container_of(work, struct ivpu_device, irq_dct_work);
bool enable;
int ret;
if (ivpu_hw_btrs_dct_get_request(vdev, &enable))
return;
- if (vdev->pm->dct_active_percent)
+ if (enable)
ret = ivpu_pm_dct_enable(vdev, DCT_DEFAULT_ACTIVE_PERCENT);
else
ret = ivpu_pm_dct_disable(vdev);
diff --git a/drivers/accel/ivpu/ivpu_pm.h b/drivers/accel/ivpu/ivpu_pm.h
index b70efe6c36e4..89b264cc0e3e 100644
--- a/drivers/accel/ivpu/ivpu_pm.h
+++ b/drivers/accel/ivpu/ivpu_pm.h
@@ -45,6 +45,6 @@ void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev);
int ivpu_pm_dct_init(struct ivpu_device *vdev);
int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent);
int ivpu_pm_dct_disable(struct ivpu_device *vdev);
-void ivpu_pm_dct_irq_thread_handler(struct ivpu_device *vdev);
+void ivpu_pm_irq_dct_work_fn(struct work_struct *work);
#endif /* __IVPU_PM_H__ */
diff --git a/drivers/accel/ivpu/ivpu_sysfs.c b/drivers/accel/ivpu/ivpu_sysfs.c
index 616477fc17fa..8a616791c32f 100644
--- a/drivers/accel/ivpu/ivpu_sysfs.c
+++ b/drivers/accel/ivpu/ivpu_sysfs.c
@@ -30,11 +30,12 @@ npu_busy_time_us_show(struct device *dev, struct device_attribute *attr, char *b
struct ivpu_device *vdev = to_ivpu_device(drm);
ktime_t total, now = 0;
- xa_lock(&vdev->submitted_jobs_xa);
+ mutex_lock(&vdev->submitted_jobs_lock);
+
total = vdev->busy_time;
if (!xa_empty(&vdev->submitted_jobs_xa))
now = ktime_sub(ktime_get(), vdev->busy_start_ts);
- xa_unlock(&vdev->submitted_jobs_xa);
+ mutex_unlock(&vdev->submitted_jobs_lock);
return sysfs_emit(buf, "%lld\n", ktime_to_us(ktime_add(total, now)));
}
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index 81819b9ef8d4..32f0e81d3e30 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -431,7 +431,7 @@ static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
int bars;
int ret;
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ bars = pci_select_bars(pdev, IORESOURCE_MEM) & 0x3f;
/* make sure the device has the expected BARs */
if (bars != (BIT(0) | BIT(2) | BIT(4))) {
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index d81b55f5068c..7f10aa38269d 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -452,7 +452,7 @@ config ACPI_SBS
the modules will be called sbs and sbshc.
config ACPI_HED
- tristate "Hardware Error Device"
+ bool "Hardware Error Device"
help
This driver supports the Hardware Error Device (PNP0C33),
which is used to report some hardware errors notified via
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index 01abf26764b0..3f5a1840f573 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -355,8 +355,10 @@ static bool acpi_pnp_match(const char *idstr, const struct acpi_device_id **matc
* device represented by it.
*/
static const struct acpi_device_id acpi_nonpnp_device_ids[] = {
+ {"INT3F0D"},
{"INTC1080"},
{"INTC1081"},
+ {"INTC1099"},
{""},
};
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 8db09d81918f..3c5f34892734 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -2301,6 +2301,34 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"),
},
},
+ /*
+ * Lenovo Legion Go S; touchscreen blocks HW sleep when woken up from EC
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3929
+ */
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83L3"),
+ }
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83N6"),
+ }
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83Q2"),
+ }
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"),
+ }
+ },
{ },
};
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
index 7652515a6be1..3499f86c411e 100644
--- a/drivers/acpi/hed.c
+++ b/drivers/acpi/hed.c
@@ -80,7 +80,12 @@ static struct acpi_driver acpi_hed_driver = {
.remove = acpi_hed_remove,
},
};
-module_acpi_driver(acpi_hed_driver);
+
+static int __init acpi_hed_driver_init(void)
+{
+ return acpi_bus_register_driver(&acpi_hed_driver);
+}
+subsys_initcall(acpi_hed_driver_init);
MODULE_AUTHOR("Huang Ying");
MODULE_DESCRIPTION("ACPI Hardware Error Device Driver");
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index a35dd0e41c27..54676e3d82dd 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -229,18 +229,20 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
node_entry = ACPI_PTR_DIFF(node, table_hdr);
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
sizeof(struct acpi_table_pptt));
- proc_sz = sizeof(struct acpi_pptt_processor *);
+ proc_sz = sizeof(struct acpi_pptt_processor);
- while ((unsigned long)entry + proc_sz < table_end) {
+ /* ignore subtable types that are smaller than a processor node */
+ while ((unsigned long)entry + proc_sz <= table_end) {
cpu_node = (struct acpi_pptt_processor *)entry;
+
if (entry->type == ACPI_PPTT_TYPE_PROCESSOR &&
cpu_node->parent == node_entry)
return 0;
if (entry->length == 0)
return 0;
+
entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry,
entry->length);
-
}
return 1;
}
@@ -270,18 +272,21 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he
table_end = (unsigned long)table_hdr + table_hdr->length;
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
sizeof(struct acpi_table_pptt));
- proc_sz = sizeof(struct acpi_pptt_processor *);
+ proc_sz = sizeof(struct acpi_pptt_processor);
/* find the processor structure associated with this cpuid */
- while ((unsigned long)entry + proc_sz < table_end) {
+ while ((unsigned long)entry + proc_sz <= table_end) {
cpu_node = (struct acpi_pptt_processor *)entry;
if (entry->length == 0) {
pr_warn("Invalid zero length subtable\n");
break;
}
+ /* entry->length may not equal proc_sz, revalidate the processor structure length */
if (entry->type == ACPI_PPTT_TYPE_PROCESSOR &&
acpi_cpu_id == cpu_node->acpi_processor_id &&
+ (unsigned long)entry + entry->length <= table_end &&
+ entry->length == proc_sz + cpu_node->number_of_priv_resources * sizeof(u32) &&
acpi_pptt_leaf_node(table_hdr, cpu_node)) {
return (struct acpi_pptt_processor *)entry;
}
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 76052006bd87..6be0f7ac7213 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -79,6 +79,8 @@ static HLIST_HEAD(binder_deferred_list);
static DEFINE_MUTEX(binder_deferred_lock);
static HLIST_HEAD(binder_devices);
+static DEFINE_SPINLOCK(binder_devices_lock);
+
static HLIST_HEAD(binder_procs);
static DEFINE_MUTEX(binder_procs_lock);
@@ -5244,6 +5246,7 @@ static void binder_free_proc(struct binder_proc *proc)
__func__, proc->outstanding_txns);
device = container_of(proc->context, struct binder_device, context);
if (refcount_dec_and_test(&device->ref)) {
+ binder_remove_device(device);
kfree(proc->context->name);
kfree(device);
}
@@ -6373,7 +6376,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
seq_printf(m, " node %d", buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd offset %lx\n",
buffer->data_size, buffer->offsets_size,
- proc->alloc.vm_start - buffer->user_data);
+ buffer->user_data - proc->alloc.vm_start);
}
static void print_binder_work_ilocked(struct seq_file *m,
@@ -6929,7 +6932,16 @@ const struct binder_debugfs_entry binder_debugfs_entries[] = {
void binder_add_device(struct binder_device *device)
{
+ spin_lock(&binder_devices_lock);
hlist_add_head(&device->hlist, &binder_devices);
+ spin_unlock(&binder_devices_lock);
+}
+
+void binder_remove_device(struct binder_device *device)
+{
+ spin_lock(&binder_devices_lock);
+ hlist_del_init(&device->hlist);
+ spin_unlock(&binder_devices_lock);
}
static int __init init_binder_device(const char *name)
@@ -6956,7 +6968,7 @@ static int __init init_binder_device(const char *name)
return ret;
}
- hlist_add_head(&binder_device->hlist, &binder_devices);
+ binder_add_device(binder_device);
return ret;
}
@@ -7018,7 +7030,7 @@ static int __init binder_init(void)
err_init_binder_device_failed:
hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
misc_deregister(&device->miscdev);
- hlist_del(&device->hlist);
+ binder_remove_device(device);
kfree(device);
}
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index e4eb8357989c..c5d68c1d3780 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -584,9 +584,13 @@ struct binder_object {
/**
* Add a binder device to binder_devices
* @device: the new binder device to add to the global list
- *
- * Not reentrant as the list is not protected by any locks
*/
void binder_add_device(struct binder_device *device);
+/**
+ * Remove a binder device to binder_devices
+ * @device: the binder device to remove from the global list
+ */
+void binder_remove_device(struct binder_device *device);
+
#endif /* _LINUX_BINDER_INTERNAL_H */
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 94c6446604fc..44d430c4ebef 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -274,7 +274,7 @@ static void binderfs_evict_inode(struct inode *inode)
mutex_unlock(&binderfs_minors_mutex);
if (refcount_dec_and_test(&device->ref)) {
- hlist_del_init(&device->hlist);
+ binder_remove_device(device);
kfree(device->context.name);
kfree(device);
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 2796c0da8257..c0eb8c67a9ff 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2453,8 +2453,8 @@ static unsigned int ata_msense_control_ata_feature(struct ata_device *dev,
*/
put_unaligned_be16(ATA_FEATURE_SUB_MPAGE_LEN - 4, &buf[2]);
- if (dev->flags & ATA_DFLAG_CDL)
- buf[4] = 0x02; /* Support T2A and T2B pages */
+ if (dev->flags & ATA_DFLAG_CDL_ENABLED)
+ buf[4] = 0x02; /* T2A and T2B pages enabled */
else
buf[4] = 0;
@@ -3886,12 +3886,11 @@ static int ata_mselect_control_spg0(struct ata_queued_cmd *qc,
}
/*
- * Translate MODE SELECT control mode page, sub-pages f2h (ATA feature mode
+ * Translate MODE SELECT control mode page, sub-page f2h (ATA feature mode
* page) into a SET FEATURES command.
*/
-static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
- const u8 *buf, int len,
- u16 *fp)
+static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
+ const u8 *buf, int len, u16 *fp)
{
struct ata_device *dev = qc->dev;
struct ata_taskfile *tf = &qc->tf;
@@ -3909,17 +3908,27 @@ static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
/* Check cdl_ctrl */
switch (buf[0] & 0x03) {
case 0:
- /* Disable CDL */
+ /* Disable CDL if it is enabled */
+ if (!(dev->flags & ATA_DFLAG_CDL_ENABLED))
+ return 0;
+ ata_dev_dbg(dev, "Disabling CDL\n");
cdl_action = 0;
dev->flags &= ~ATA_DFLAG_CDL_ENABLED;
break;
case 0x02:
- /* Enable CDL T2A/T2B: NCQ priority must be disabled */
+ /*
+ * Enable CDL if not already enabled. Since this is mutually
+ * exclusive with NCQ priority, allow this only if NCQ priority
+ * is disabled.
+ */
+ if (dev->flags & ATA_DFLAG_CDL_ENABLED)
+ return 0;
if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) {
ata_dev_err(dev,
"NCQ priority must be disabled to enable CDL\n");
return -EINVAL;
}
+ ata_dev_dbg(dev, "Enabling CDL\n");
cdl_action = 1;
dev->flags |= ATA_DFLAG_CDL_ENABLED;
break;
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 19b619376d48..09020bb8ad15 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -595,18 +595,19 @@ static int charlcd_init(struct charlcd *lcd)
return 0;
}
-struct charlcd *charlcd_alloc(void)
+struct charlcd *charlcd_alloc(unsigned int drvdata_size)
{
struct charlcd_priv *priv;
struct charlcd *lcd;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv) + drvdata_size, GFP_KERNEL);
if (!priv)
return NULL;
priv->esc_seq.len = -1;
lcd = &priv->lcd;
+ lcd->drvdata = priv->drvdata;
return lcd;
}
diff --git a/drivers/auxdisplay/charlcd.h b/drivers/auxdisplay/charlcd.h
index 4d4287209d04..d10b89740bca 100644
--- a/drivers/auxdisplay/charlcd.h
+++ b/drivers/auxdisplay/charlcd.h
@@ -51,7 +51,7 @@ struct charlcd {
unsigned long y;
} addr;
- void *drvdata;
+ void *drvdata; /* Set by charlcd_alloc() */
};
/**
@@ -95,7 +95,8 @@ struct charlcd_ops {
};
void charlcd_backlight(struct charlcd *lcd, enum charlcd_onoff on);
-struct charlcd *charlcd_alloc(void);
+
+struct charlcd *charlcd_alloc(unsigned int drvdata_size);
void charlcd_free(struct charlcd *lcd);
int charlcd_register(struct charlcd *lcd);
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index 9d0ae9c02e9b..1d67fe324341 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -226,7 +226,7 @@ static int hd44780_probe(struct platform_device *pdev)
if (!hdc)
return -ENOMEM;
- lcd = charlcd_alloc();
+ lcd = charlcd_alloc(0);
if (!lcd)
goto fail1;
diff --git a/drivers/auxdisplay/lcd2s.c b/drivers/auxdisplay/lcd2s.c
index a28daa4ffbf7..c71ebb925971 100644
--- a/drivers/auxdisplay/lcd2s.c
+++ b/drivers/auxdisplay/lcd2s.c
@@ -307,7 +307,7 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c)
if (err < 0)
return err;
- lcd = charlcd_alloc();
+ lcd = charlcd_alloc(0);
if (!lcd)
return -ENOMEM;
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index 6dc8798d01f9..4da142692d55 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -835,7 +835,7 @@ static void lcd_init(void)
if (!hdc)
return;
- charlcd = charlcd_alloc();
+ charlcd = charlcd_alloc(0);
if (!charlcd) {
kfree(hdc);
return;
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 0042e4774b0c..123031a757d9 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -73,6 +73,7 @@ static inline void subsys_put(struct subsys_private *sp)
kset_put(&sp->subsys);
}
+struct subsys_private *bus_to_subsys(const struct bus_type *bus);
struct subsys_private *class_to_subsys(const struct class *class);
struct driver_private {
@@ -180,6 +181,22 @@ int driver_add_groups(const struct device_driver *drv, const struct attribute_gr
void driver_remove_groups(const struct device_driver *drv, const struct attribute_group **groups);
void device_driver_detach(struct device *dev);
+static inline void device_set_driver(struct device *dev, const struct device_driver *drv)
+{
+ /*
+ * Majority (all?) read accesses to dev->driver happens either
+ * while holding device lock or in bus/driver code that is only
+ * invoked when the device is bound to a driver and there is no
+ * concern of the pointer being changed while it is being read.
+ * However when reading device's uevent file we read driver pointer
+ * without taking device lock (so we do not block there for
+ * arbitrary amount of time). We use WRITE_ONCE() here to prevent
+ * tearing so that READ_ONCE() can safely be used in uevent code.
+ */
+ // FIXME - this cast should not be needed "soon"
+ WRITE_ONCE(dev->driver, (struct device_driver *)drv);
+}
+
int devres_release_all(struct device *dev);
void device_block_probing(void);
void device_unblock_probing(void);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 6b9e65a42cd2..c8c7e0804024 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -57,7 +57,7 @@ static int __must_check bus_rescan_devices_helper(struct device *dev,
* NULL. A call to subsys_put() must be done when finished with the pointer in
* order for it to be properly freed.
*/
-static struct subsys_private *bus_to_subsys(const struct bus_type *bus)
+struct subsys_private *bus_to_subsys(const struct bus_type *bus)
{
struct subsys_private *sp = NULL;
struct kobject *kobj;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 2fde698430df..93019bb6998e 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2624,6 +2624,35 @@ static const char *dev_uevent_name(const struct kobject *kobj)
return NULL;
}
+/*
+ * Try filling "DRIVER=<name>" uevent variable for a device. Because this
+ * function may race with binding and unbinding the device from a driver,
+ * we need to be careful. Binding is generally safe, at worst we miss the
+ * fact that the device is already bound to a driver (but the driver
+ * information that is delivered through uevents is best-effort, it may
+ * become obsolete as soon as it is generated anyways). Unbinding is more
+ * risky as driver pointer is transitioning to NULL, so READ_ONCE() should
+ * be used to make sure we are dealing with the same pointer, and to
+ * ensure that driver structure is not going to disappear from under us
+ * we take bus' drivers klist lock. The assumption that only registered
+ * driver can be bound to a device, and to unregister a driver bus code
+ * will take the same lock.
+ */
+static void dev_driver_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ struct subsys_private *sp = bus_to_subsys(dev->bus);
+
+ if (sp) {
+ scoped_guard(spinlock, &sp->klist_drivers.k_lock) {
+ struct device_driver *drv = READ_ONCE(dev->driver);
+ if (drv)
+ add_uevent_var(env, "DRIVER=%s", drv->name);
+ }
+
+ subsys_put(sp);
+ }
+}
+
static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
{
const struct device *dev = kobj_to_dev(kobj);
@@ -2655,8 +2684,8 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
if (dev->type && dev->type->name)
add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
- if (dev->driver)
- add_uevent_var(env, "DRIVER=%s", dev->driver->name);
+ /* Add "DRIVER=%s" variable if the device is bound to a driver */
+ dev_driver_uevent(dev, env);
/* Add common DT information about the device */
of_device_uevent(dev, env);
@@ -2726,11 +2755,8 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
if (!env)
return -ENOMEM;
- /* Synchronize with really_probe() */
- device_lock(dev);
/* let the kset specific function add its keys */
retval = kset->uevent_ops->uevent(&dev->kobj, env);
- device_unlock(dev);
if (retval)
goto out;
@@ -3700,7 +3726,7 @@ done:
device_pm_remove(dev);
dpm_sysfs_remove(dev);
DPMError:
- dev->driver = NULL;
+ device_set_driver(dev, NULL);
bus_remove_device(dev);
BusError:
device_remove_attrs(dev);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index a7e511849875..50651435577c 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -600,6 +600,7 @@ CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
CPU_SHOW_VULN_FALLBACK(gds);
CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
CPU_SHOW_VULN_FALLBACK(ghostwrite);
+CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -616,6 +617,7 @@ static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NU
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL);
+static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -633,6 +635,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_gather_data_sampling.attr,
&dev_attr_reg_file_data_sampling.attr,
&dev_attr_ghostwrite.attr,
+ &dev_attr_indirect_target_selection.attr,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index f0e4b4aba885..b526e0e0f52d 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -550,7 +550,7 @@ static void device_unbind_cleanup(struct device *dev)
arch_teardown_dma_ops(dev);
kfree(dev->dma_range_map);
dev->dma_range_map = NULL;
- dev->driver = NULL;
+ device_set_driver(dev, NULL);
dev_set_drvdata(dev, NULL);
if (dev->pm_domain && dev->pm_domain->dismiss)
dev->pm_domain->dismiss(dev);
@@ -629,8 +629,7 @@ static int really_probe(struct device *dev, const struct device_driver *drv)
}
re_probe:
- // FIXME - this cast should not be needed "soon"
- dev->driver = (struct device_driver *)drv;
+ device_set_driver(dev, drv);
/* If using pinctrl, bind pins now before probing */
ret = pinctrl_bind_pins(dev);
@@ -1014,7 +1013,7 @@ static int __device_attach(struct device *dev, bool allow_async)
if (ret == 0)
ret = 1;
else {
- dev->driver = NULL;
+ device_set_driver(dev, NULL);
ret = 0;
}
} else {
diff --git a/drivers/base/faux.c b/drivers/base/faux.c
index 531e9d789ee0..407c1d1aad50 100644
--- a/drivers/base/faux.c
+++ b/drivers/base/faux.c
@@ -102,7 +102,9 @@ static void faux_device_release(struct device *dev)
*
* Note, when this function is called, the functions specified in struct
* faux_ops can be called before the function returns, so be prepared for
- * everything to be properly initialized before that point in time.
+ * everything to be properly initialized before that point in time. If the
+ * probe callback (if one is present) does NOT succeed, the creation of the
+ * device will fail and NULL will be returned.
*
* Return:
* * NULL if an error happened with creating the device
@@ -147,6 +149,17 @@ struct faux_device *faux_device_create_with_groups(const char *name,
return NULL;
}
+ /*
+ * Verify that we did bind the driver to the device (i.e. probe worked),
+ * if not, let's fail the creation as trying to guess if probe was
+ * successful is almost impossible to determine by the caller.
+ */
+ if (!dev->driver) {
+ dev_err(dev, "probe did not succeed, tearing down the device\n");
+ faux_device_destroy(faux_dev);
+ faux_dev = NULL;
+ }
+
return faux_dev;
}
EXPORT_SYMBOL_GPL(faux_device_create_with_groups);
diff --git a/drivers/base/module.c b/drivers/base/module.c
index 5bc71bea883a..218aaa096455 100644
--- a/drivers/base/module.c
+++ b/drivers/base/module.c
@@ -42,16 +42,13 @@ int module_add_driver(struct module *mod, const struct device_driver *drv)
if (mod)
mk = &mod->mkobj;
else if (drv->mod_name) {
- struct kobject *mkobj;
-
- /* Lookup built-in module entry in /sys/modules */
- mkobj = kset_find_obj(module_kset, drv->mod_name);
- if (mkobj) {
- mk = container_of(mkobj, struct module_kobject, kobj);
+ /* Lookup or create built-in module entry in /sys/modules */
+ mk = lookup_or_create_module_kobject(drv->mod_name);
+ if (mk) {
/* remember our module structure */
drv->p->mkobj = mk;
- /* kset_find_obj took a reference */
- kobject_put(mkobj);
+ /* lookup_or_create_module_kobject took a reference */
+ kobject_put(&mk->kobj);
}
}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 23be2d1b0407..37fe251b4c59 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -933,6 +933,13 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
goto Complete;
if (dev->power.direct_complete) {
+ /*
+ * Allow new children to be added under the device after this
+ * point if it has no PM callbacks.
+ */
+ if (dev->power.no_pm_callbacks)
+ dev->power.is_prepared = false;
+
/* Match the pm_runtime_disable() in device_suspend(). */
pm_runtime_enable(dev);
goto Complete;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 7668b79d8b0a..0b135d1ca25e 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -205,8 +205,6 @@ static bool lo_can_use_dio(struct loop_device *lo)
*/
static inline void loop_update_dio(struct loop_device *lo)
{
- bool dio_in_use = lo->lo_flags & LO_FLAGS_DIRECT_IO;
-
lockdep_assert_held(&lo->lo_mutex);
WARN_ON_ONCE(lo->lo_state == Lo_bound &&
lo->lo_queue->mq_freeze_depth == 0);
@@ -215,10 +213,6 @@ static inline void loop_update_dio(struct loop_device *lo)
lo->lo_flags |= LO_FLAGS_DIRECT_IO;
if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && !lo_can_use_dio(lo))
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
-
- /* flush dirty pages before starting to issue direct I/O */
- if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && !dio_in_use)
- vfs_fsync(lo->lo_backing_file, 0);
}
/**
@@ -496,6 +490,25 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
return 0;
}
+static void loop_assign_backing_file(struct loop_device *lo, struct file *file)
+{
+ lo->lo_backing_file = file;
+ lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
+ mapping_set_gfp_mask(file->f_mapping,
+ lo->old_gfp_mask & ~(__GFP_IO | __GFP_FS));
+}
+
+static int loop_check_backing_file(struct file *file)
+{
+ if (!file->f_op->read_iter)
+ return -EINVAL;
+
+ if ((file->f_mode & FMODE_WRITE) && !file->f_op->write_iter)
+ return -EINVAL;
+
+ return 0;
+}
+
/*
* loop_change_fd switched the backing store of a loopback device to
* a new file. This is useful for operating system installers to free up
@@ -517,6 +530,10 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
if (!file)
return -EBADF;
+ error = loop_check_backing_file(file);
+ if (error)
+ return error;
+
/* suppress uevents while reconfiguring the device */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
@@ -545,14 +562,18 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
goto out_err;
+ /*
+ * We might switch to direct I/O mode for the loop device, write back
+ * all dirty data the page cache now that so that the individual I/O
+ * operations don't have to do that.
+ */
+ vfs_fsync(file, 0);
+
/* and ... switch */
disk_force_media_change(lo->lo_disk);
memflags = blk_mq_freeze_queue(lo->lo_queue);
mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
- lo->lo_backing_file = file;
- lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
- mapping_set_gfp_mask(file->f_mapping,
- lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
+ loop_assign_backing_file(lo, file);
loop_update_dio(lo);
blk_mq_unfreeze_queue(lo->lo_queue, memflags);
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
@@ -899,7 +920,7 @@ static unsigned int loop_default_blocksize(struct loop_device *lo,
struct block_device *backing_bdev)
{
/* In case of direct I/O, match underlying block size */
- if ((lo->lo_backing_file->f_flags & O_DIRECT) && backing_bdev)
+ if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && backing_bdev)
return bdev_logical_block_size(backing_bdev);
return SECTOR_SIZE;
}
@@ -943,7 +964,6 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
const struct loop_config *config)
{
struct file *file = fget(config->fd);
- struct address_space *mapping;
struct queue_limits lim;
int error;
loff_t size;
@@ -952,6 +972,11 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
if (!file)
return -EBADF;
+
+ error = loop_check_backing_file(file);
+ if (error)
+ return error;
+
is_loop = is_loop_device(file);
/* This is safe, since we have a reference from open(). */
@@ -979,8 +1004,6 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
if (error)
goto out_unlock;
- mapping = file->f_mapping;
-
if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) {
error = -EINVAL;
goto out_unlock;
@@ -1012,9 +1035,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
lo->lo_device = bdev;
- lo->lo_backing_file = file;
- lo->old_gfp_mask = mapping_gfp_mask(mapping);
- mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
+ loop_assign_backing_file(lo, file);
lim = queue_limits_start_update(lo->lo_queue);
loop_update_limits(lo, &lim, config->block_size);
@@ -1023,6 +1044,13 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
if (error)
goto out_unlock;
+ /*
+ * We might switch to direct I/O mode for the loop device, write back
+ * all dirty data the page cache now that so that the individual I/O
+ * operations don't have to do that.
+ */
+ vfs_fsync(file, 0);
+
loop_update_dio(lo);
loop_sysfs_init(lo);
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 175566a71bb3..41c2cd5818b4 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -592,41 +592,41 @@ static ssize_t nullb_device_zone_offline_store(struct config_item *item,
CONFIGFS_ATTR_WO(nullb_device_, zone_offline);
static struct configfs_attribute *nullb_device_attrs[] = {
- &nullb_device_attr_size,
+ &nullb_device_attr_badblocks,
+ &nullb_device_attr_blocking,
+ &nullb_device_attr_blocksize,
+ &nullb_device_attr_cache_size,
&nullb_device_attr_completion_nsec,
- &nullb_device_attr_submit_queues,
- &nullb_device_attr_poll_queues,
+ &nullb_device_attr_discard,
+ &nullb_device_attr_fua,
&nullb_device_attr_home_node,
- &nullb_device_attr_queue_mode,
- &nullb_device_attr_blocksize,
- &nullb_device_attr_max_sectors,
- &nullb_device_attr_irqmode,
&nullb_device_attr_hw_queue_depth,
&nullb_device_attr_index,
- &nullb_device_attr_blocking,
- &nullb_device_attr_use_per_node_hctx,
- &nullb_device_attr_power,
- &nullb_device_attr_memory_backed,
- &nullb_device_attr_discard,
+ &nullb_device_attr_irqmode,
+ &nullb_device_attr_max_sectors,
&nullb_device_attr_mbps,
- &nullb_device_attr_cache_size,
- &nullb_device_attr_badblocks,
- &nullb_device_attr_zoned,
- &nullb_device_attr_zone_size,
+ &nullb_device_attr_memory_backed,
+ &nullb_device_attr_no_sched,
+ &nullb_device_attr_poll_queues,
+ &nullb_device_attr_power,
+ &nullb_device_attr_queue_mode,
+ &nullb_device_attr_rotational,
+ &nullb_device_attr_shared_tag_bitmap,
+ &nullb_device_attr_shared_tags,
+ &nullb_device_attr_size,
+ &nullb_device_attr_submit_queues,
+ &nullb_device_attr_use_per_node_hctx,
+ &nullb_device_attr_virt_boundary,
+ &nullb_device_attr_zone_append_max_sectors,
&nullb_device_attr_zone_capacity,
- &nullb_device_attr_zone_nr_conv,
- &nullb_device_attr_zone_max_open,
+ &nullb_device_attr_zone_full,
&nullb_device_attr_zone_max_active,
- &nullb_device_attr_zone_append_max_sectors,
- &nullb_device_attr_zone_readonly,
+ &nullb_device_attr_zone_max_open,
+ &nullb_device_attr_zone_nr_conv,
&nullb_device_attr_zone_offline,
- &nullb_device_attr_zone_full,
- &nullb_device_attr_virt_boundary,
- &nullb_device_attr_no_sched,
- &nullb_device_attr_shared_tags,
- &nullb_device_attr_shared_tag_bitmap,
- &nullb_device_attr_fua,
- &nullb_device_attr_rotational,
+ &nullb_device_attr_zone_readonly,
+ &nullb_device_attr_zone_size,
+ &nullb_device_attr_zoned,
NULL,
};
@@ -704,16 +704,28 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
static ssize_t memb_group_features_show(struct config_item *item, char *page)
{
- return snprintf(page, PAGE_SIZE,
- "badblocks,blocking,blocksize,cache_size,fua,"
- "completion_nsec,discard,home_node,hw_queue_depth,"
- "irqmode,max_sectors,mbps,memory_backed,no_sched,"
- "poll_queues,power,queue_mode,shared_tag_bitmap,"
- "shared_tags,size,submit_queues,use_per_node_hctx,"
- "virt_boundary,zoned,zone_capacity,zone_max_active,"
- "zone_max_open,zone_nr_conv,zone_offline,zone_readonly,"
- "zone_size,zone_append_max_sectors,zone_full,"
- "rotational\n");
+
+ struct configfs_attribute **entry;
+ char delimiter = ',';
+ size_t left = PAGE_SIZE;
+ size_t written = 0;
+ int ret;
+
+ for (entry = &nullb_device_attrs[0]; *entry && left > 0; entry++) {
+ if (!*(entry + 1))
+ delimiter = '\n';
+ ret = snprintf(page + written, left, "%s%c", (*entry)->ca_name,
+ delimiter);
+ if (ret >= left) {
+ WARN_ONCE(1, "Too many null_blk features to print\n");
+ memzero_explicit(page, PAGE_SIZE);
+ return -ENOBUFS;
+ }
+ left -= ret;
+ written += ret;
+ }
+
+ return written;
}
CONFIGFS_ATTR_RO(memb_group_, features);
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 971b793dedd0..08694d7d18db 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -73,12 +73,24 @@
UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED)
struct ublk_rq_data {
- struct llist_node node;
-
struct kref ref;
};
struct ublk_uring_cmd_pdu {
+ /*
+ * Store requests in same batch temporarily for queuing them to
+ * daemon context.
+ *
+ * It should have been stored to request payload, but we do want
+ * to avoid extra pre-allocation, and uring_cmd payload is always
+ * free for us
+ */
+ struct request *req_list;
+
+ /*
+ * The following two are valid in this cmd whole lifetime, and
+ * setup in ublk uring_cmd handler
+ */
struct ublk_queue *ubq;
u16 tag;
};
@@ -104,15 +116,6 @@ struct ublk_uring_cmd_pdu {
#define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
/*
- * IO command is aborted, so this flag is set in case of
- * !UBLK_IO_FLAG_ACTIVE.
- *
- * After this flag is observed, any pending or new incoming request
- * associated with this io command will be failed immediately
- */
-#define UBLK_IO_FLAG_ABORTED 0x04
-
-/*
* UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
* get data buffer address from ublksrv.
*
@@ -141,8 +144,6 @@ struct ublk_queue {
struct task_struct *ubq_daemon;
char *io_cmd_buf;
- struct llist_head io_cmds;
-
unsigned long io_addr; /* mapped vm address */
unsigned int max_io_sz;
bool force_abort;
@@ -184,8 +185,6 @@ struct ublk_device {
struct completion completion;
unsigned int nr_queues_ready;
unsigned int nr_privileged_daemon;
-
- struct work_struct nosrv_work;
};
/* header of ublk_params */
@@ -194,7 +193,9 @@ struct ublk_params_header {
__u32 types;
};
-static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq);
+
+static void ublk_stop_dev_unlocked(struct ublk_device *ub);
+static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq);
static inline unsigned int ublk_req_build_flags(struct request *req);
static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
@@ -489,15 +490,17 @@ static wait_queue_head_t ublk_idr_wq; /* wait until one idr is freed */
static DEFINE_MUTEX(ublk_ctl_mutex);
+
+#define UBLK_MAX_UBLKS UBLK_MINORS
+
/*
- * Max ublk devices allowed to add
+ * Max unprivileged ublk devices allowed to add
*
* It can be extended to one per-user limit in future or even controlled
* by cgroup.
*/
-#define UBLK_MAX_UBLKS UBLK_MINORS
-static unsigned int ublks_max = 64;
-static unsigned int ublks_added; /* protected by ublk_ctl_mutex */
+static unsigned int unprivileged_ublks_max = 64;
+static unsigned int unprivileged_ublks_added; /* protected by ublk_ctl_mutex */
static struct miscdevice ublk_misc;
@@ -584,6 +587,11 @@ static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
return ubq->flags & UBLK_F_USER_COPY;
}
+static inline bool ublk_need_map_io(const struct ublk_queue *ubq)
+{
+ return !ublk_support_user_copy(ubq);
+}
+
static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
{
/*
@@ -911,7 +919,7 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
{
const unsigned int rq_bytes = blk_rq_bytes(req);
- if (ublk_support_user_copy(ubq))
+ if (!ublk_need_map_io(ubq))
return rq_bytes;
/*
@@ -935,7 +943,7 @@ static int ublk_unmap_io(const struct ublk_queue *ubq,
{
const unsigned int rq_bytes = blk_rq_bytes(req);
- if (ublk_support_user_copy(ubq))
+ if (!ublk_need_map_io(ubq))
return rq_bytes;
if (ublk_need_unmap_req(req)) {
@@ -1028,7 +1036,7 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
{
- return ubq->ubq_daemon->flags & PF_EXITING;
+ return !ubq->ubq_daemon || ubq->ubq_daemon->flags & PF_EXITING;
}
/* todo: handle partial completion */
@@ -1039,12 +1047,6 @@ static inline void __ublk_complete_rq(struct request *req)
unsigned int unmapped_bytes;
blk_status_t res = BLK_STS_OK;
- /* called from ublk_abort_queue() code path */
- if (io->flags & UBLK_IO_FLAG_ABORTED) {
- res = BLK_STS_IOERR;
- goto exit;
- }
-
/* failed read IO if nothing is read */
if (!io->res && req_op(req) == REQ_OP_READ)
io->res = -EIO;
@@ -1094,47 +1096,6 @@ static void ublk_complete_rq(struct kref *ref)
__ublk_complete_rq(req);
}
-static void ublk_do_fail_rq(struct request *req)
-{
- struct ublk_queue *ubq = req->mq_hctx->driver_data;
-
- if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
- blk_mq_requeue_request(req, false);
- else
- __ublk_complete_rq(req);
-}
-
-static void ublk_fail_rq_fn(struct kref *ref)
-{
- struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data,
- ref);
- struct request *req = blk_mq_rq_from_pdu(data);
-
- ublk_do_fail_rq(req);
-}
-
-/*
- * Since __ublk_rq_task_work always fails requests immediately during
- * exiting, __ublk_fail_req() is only called from abort context during
- * exiting. So lock is unnecessary.
- *
- * Also aborting may not be started yet, keep in mind that one failed
- * request may be issued by block layer again.
- */
-static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
- struct request *req)
-{
- WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
-
- if (ublk_need_req_ref(ubq)) {
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
-
- kref_put(&data->ref, ublk_fail_rq_fn);
- } else {
- ublk_do_fail_rq(req);
- }
-}
-
static void ubq_complete_io_cmd(struct ublk_io *io, int res,
unsigned issue_flags)
{
@@ -1163,10 +1124,10 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
blk_mq_end_request(rq, BLK_STS_IOERR);
}
-static inline void __ublk_rq_task_work(struct request *req,
- unsigned issue_flags)
+static void ublk_dispatch_req(struct ublk_queue *ubq,
+ struct request *req,
+ unsigned int issue_flags)
{
- struct ublk_queue *ubq = req->mq_hctx->driver_data;
int tag = req->tag;
struct ublk_io *io = &ubq->ios[tag];
unsigned int mapped_bytes;
@@ -1242,41 +1203,55 @@ static inline void __ublk_rq_task_work(struct request *req,
ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
}
-static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
- unsigned issue_flags)
-{
- struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
- struct ublk_rq_data *data, *tmp;
-
- io_cmds = llist_reverse_order(io_cmds);
- llist_for_each_entry_safe(data, tmp, io_cmds, node)
- __ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
-}
-
-static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
+static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
{
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct ublk_queue *ubq = pdu->ubq;
+ int tag = pdu->tag;
+ struct request *req = blk_mq_tag_to_rq(
+ ubq->dev->tag_set.tags[ubq->q_id], tag);
- ublk_forward_io_cmds(ubq, issue_flags);
+ ublk_dispatch_req(ubq, req, issue_flags);
}
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
{
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
+ struct ublk_io *io = &ubq->ios[rq->tag];
- if (llist_add(&data->node, &ubq->io_cmds)) {
- struct ublk_io *io = &ubq->ios[rq->tag];
+ io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb);
+}
- io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb);
+static void ublk_cmd_list_tw_cb(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+ struct request *rq = pdu->req_list;
+ struct ublk_queue *ubq = rq->mq_hctx->driver_data;
+ struct request *next;
+
+ while (rq) {
+ next = rq->rq_next;
+ rq->rq_next = NULL;
+ ublk_dispatch_req(ubq, rq, issue_flags);
+ rq = next;
}
}
+static void ublk_queue_cmd_list(struct ublk_queue *ubq, struct rq_list *l)
+{
+ struct request *rq = rq_list_peek(l);
+ struct ublk_io *io = &ubq->ios[rq->tag];
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(io->cmd);
+
+ pdu->req_list = rq;
+ rq_list_init(l);
+ io_uring_cmd_complete_in_task(io->cmd, ublk_cmd_list_tw_cb);
+}
+
static enum blk_eh_timer_return ublk_timeout(struct request *rq)
{
struct ublk_queue *ubq = rq->mq_hctx->driver_data;
- unsigned int nr_inflight = 0;
- int i;
if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
if (!ubq->timeout) {
@@ -1287,44 +1262,16 @@ static enum blk_eh_timer_return ublk_timeout(struct request *rq)
return BLK_EH_DONE;
}
- if (!ubq_daemon_is_dying(ubq))
- return BLK_EH_RESET_TIMER;
-
- for (i = 0; i < ubq->q_depth; i++) {
- struct ublk_io *io = &ubq->ios[i];
-
- if (!(io->flags & UBLK_IO_FLAG_ACTIVE))
- nr_inflight++;
- }
-
- /* cancelable uring_cmd can't help us if all commands are in-flight */
- if (nr_inflight == ubq->q_depth) {
- struct ublk_device *ub = ubq->dev;
-
- if (ublk_abort_requests(ub, ubq)) {
- schedule_work(&ub->nosrv_work);
- }
- return BLK_EH_DONE;
- }
-
return BLK_EH_RESET_TIMER;
}
-static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
+static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
+ bool check_cancel)
{
- struct ublk_queue *ubq = hctx->driver_data;
- struct request *rq = bd->rq;
blk_status_t res;
- if (unlikely(ubq->fail_io)) {
+ if (unlikely(ubq->fail_io))
return BLK_STS_TARGET;
- }
-
- /* fill iod to slot in io cmd buffer */
- res = ublk_setup_iod(ubq, rq);
- if (unlikely(res != BLK_STS_OK))
- return BLK_STS_IOERR;
/* With recovery feature enabled, force_abort is set in
* ublk_stop_dev() before calling del_gendisk(). We have to
@@ -1338,17 +1285,68 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
return BLK_STS_IOERR;
+ if (check_cancel && unlikely(ubq->canceling))
+ return BLK_STS_IOERR;
+
+ /* fill iod to slot in io cmd buffer */
+ res = ublk_setup_iod(ubq, rq);
+ if (unlikely(res != BLK_STS_OK))
+ return BLK_STS_IOERR;
+
+ blk_mq_start_request(rq);
+ return BLK_STS_OK;
+}
+
+static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct ublk_queue *ubq = hctx->driver_data;
+ struct request *rq = bd->rq;
+ blk_status_t res;
+
+ res = ublk_prep_req(ubq, rq, false);
+ if (res != BLK_STS_OK)
+ return res;
+
+ /*
+ * ->canceling has to be handled after ->force_abort and ->fail_io
+ * is dealt with, otherwise this request may not be failed in case
+ * of recovery, and cause hang when deleting disk
+ */
if (unlikely(ubq->canceling)) {
__ublk_abort_rq(ubq, rq);
return BLK_STS_OK;
}
- blk_mq_start_request(bd->rq);
ublk_queue_cmd(ubq, rq);
-
return BLK_STS_OK;
}
+static void ublk_queue_rqs(struct rq_list *rqlist)
+{
+ struct rq_list requeue_list = { };
+ struct rq_list submit_list = { };
+ struct ublk_queue *ubq = NULL;
+ struct request *req;
+
+ while ((req = rq_list_pop(rqlist))) {
+ struct ublk_queue *this_q = req->mq_hctx->driver_data;
+
+ if (ubq && ubq != this_q && !rq_list_empty(&submit_list))
+ ublk_queue_cmd_list(ubq, &submit_list);
+ ubq = this_q;
+
+ if (ublk_prep_req(ubq, req, true) == BLK_STS_OK)
+ rq_list_add_tail(&submit_list, req);
+ else
+ rq_list_add_tail(&requeue_list, req);
+ }
+
+ if (ubq && !rq_list_empty(&submit_list))
+ ublk_queue_cmd_list(ubq, &submit_list);
+ *rqlist = requeue_list;
+}
+
static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
unsigned int hctx_idx)
{
@@ -1361,10 +1359,42 @@ static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
static const struct blk_mq_ops ublk_mq_ops = {
.queue_rq = ublk_queue_rq,
+ .queue_rqs = ublk_queue_rqs,
.init_hctx = ublk_init_hctx,
.timeout = ublk_timeout,
};
+static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
+{
+ int i;
+
+ /* All old ioucmds have to be completed */
+ ubq->nr_io_ready = 0;
+
+ /*
+ * old daemon is PF_EXITING, put it now
+ *
+ * It could be NULL in case of closing one quisced device.
+ */
+ if (ubq->ubq_daemon)
+ put_task_struct(ubq->ubq_daemon);
+ /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
+ ubq->ubq_daemon = NULL;
+ ubq->timeout = false;
+
+ for (i = 0; i < ubq->q_depth; i++) {
+ struct ublk_io *io = &ubq->ios[i];
+
+ /*
+ * UBLK_IO_FLAG_CANCELED is kept for avoiding to touch
+ * io->cmd
+ */
+ io->flags &= UBLK_IO_FLAG_CANCELED;
+ io->cmd = NULL;
+ io->addr = 0;
+ }
+}
+
static int ublk_ch_open(struct inode *inode, struct file *filp)
{
struct ublk_device *ub = container_of(inode->i_cdev,
@@ -1376,10 +1406,119 @@ static int ublk_ch_open(struct inode *inode, struct file *filp)
return 0;
}
+static void ublk_reset_ch_dev(struct ublk_device *ub)
+{
+ int i;
+
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_queue_reinit(ub, ublk_get_queue(ub, i));
+
+ /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
+ ub->mm = NULL;
+ ub->nr_queues_ready = 0;
+ ub->nr_privileged_daemon = 0;
+}
+
+static struct gendisk *ublk_get_disk(struct ublk_device *ub)
+{
+ struct gendisk *disk;
+
+ spin_lock(&ub->lock);
+ disk = ub->ub_disk;
+ if (disk)
+ get_device(disk_to_dev(disk));
+ spin_unlock(&ub->lock);
+
+ return disk;
+}
+
+static void ublk_put_disk(struct gendisk *disk)
+{
+ if (disk)
+ put_device(disk_to_dev(disk));
+}
+
static int ublk_ch_release(struct inode *inode, struct file *filp)
{
struct ublk_device *ub = filp->private_data;
+ struct gendisk *disk;
+ int i;
+
+ /*
+ * disk isn't attached yet, either device isn't live, or it has
+ * been removed already, so we needn't to do anything
+ */
+ disk = ublk_get_disk(ub);
+ if (!disk)
+ goto out;
+ /*
+ * All uring_cmd are done now, so abort any request outstanding to
+ * the ublk server
+ *
+ * This can be done in lockless way because ublk server has been
+ * gone
+ *
+ * More importantly, we have to provide forward progress guarantee
+ * without holding ub->mutex, otherwise control task grabbing
+ * ub->mutex triggers deadlock
+ *
+ * All requests may be inflight, so ->canceling may not be set, set
+ * it now.
+ */
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+ ubq->canceling = true;
+ ublk_abort_queue(ub, ubq);
+ }
+ blk_mq_kick_requeue_list(disk->queue);
+
+ /*
+ * All infligh requests have been completed or requeued and any new
+ * request will be failed or requeued via `->canceling` now, so it is
+ * fine to grab ub->mutex now.
+ */
+ mutex_lock(&ub->mutex);
+
+ /* double check after grabbing lock */
+ if (!ub->ub_disk)
+ goto unlock;
+
+ /*
+ * Transition the device to the nosrv state. What exactly this
+ * means depends on the recovery flags
+ */
+ blk_mq_quiesce_queue(disk->queue);
+ if (ublk_nosrv_should_stop_dev(ub)) {
+ /*
+ * Allow any pending/future I/O to pass through quickly
+ * with an error. This is needed because del_gendisk
+ * waits for all pending I/O to complete
+ */
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_get_queue(ub, i)->force_abort = true;
+ blk_mq_unquiesce_queue(disk->queue);
+
+ ublk_stop_dev_unlocked(ub);
+ } else {
+ if (ublk_nosrv_dev_should_queue_io(ub)) {
+ /* ->canceling is set and all requests are aborted */
+ ub->dev_info.state = UBLK_S_DEV_QUIESCED;
+ } else {
+ ub->dev_info.state = UBLK_S_DEV_FAIL_IO;
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_get_queue(ub, i)->fail_io = true;
+ }
+ blk_mq_unquiesce_queue(disk->queue);
+ }
+unlock:
+ mutex_unlock(&ub->mutex);
+ ublk_put_disk(disk);
+
+ /* all uring_cmd has been done now, reset device & ubq */
+ ublk_reset_ch_dev(ub);
+out:
clear_bit(UB_STATE_OPEN, &ub->state);
return 0;
}
@@ -1446,10 +1585,26 @@ static void ublk_commit_completion(struct ublk_device *ub,
ublk_put_req_ref(ubq, req);
}
+static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
+ struct request *req)
+{
+ WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
+
+ if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
+ blk_mq_requeue_request(req, false);
+ else {
+ io->res = -EIO;
+ __ublk_complete_rq(req);
+ }
+}
+
/*
- * Called from ubq_daemon context via cancel fn, meantime quiesce ublk
- * blk-mq queue, so we are called exclusively with blk-mq and ubq_daemon
- * context, so everything is serialized.
+ * Called from ublk char device release handler, when any uring_cmd is
+ * done, meantime request queue is "quiesced" since all inflight requests
+ * can't be completed because ublk server is dead.
+ *
+ * So no one can hold our request IO reference any more, simply ignore the
+ * reference, and complete the request immediately
*/
static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
{
@@ -1462,50 +1617,33 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
struct request *rq;
/*
- * Either we fail the request or ublk_rq_task_work_fn
+ * Either we fail the request or ublk_rq_task_work_cb
* will do it
*/
rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
- if (rq && blk_mq_request_started(rq)) {
- io->flags |= UBLK_IO_FLAG_ABORTED;
+ if (rq && blk_mq_request_started(rq))
__ublk_fail_req(ubq, io, rq);
- }
}
}
}
/* Must be called when queue is frozen */
-static bool ublk_mark_queue_canceling(struct ublk_queue *ubq)
+static void ublk_mark_queue_canceling(struct ublk_queue *ubq)
{
- bool canceled;
-
spin_lock(&ubq->cancel_lock);
- canceled = ubq->canceling;
- if (!canceled)
+ if (!ubq->canceling)
ubq->canceling = true;
spin_unlock(&ubq->cancel_lock);
-
- return canceled;
}
-static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
+static void ublk_start_cancel(struct ublk_queue *ubq)
{
- bool was_canceled = ubq->canceling;
- struct gendisk *disk;
-
- if (was_canceled)
- return false;
-
- spin_lock(&ub->lock);
- disk = ub->ub_disk;
- if (disk)
- get_device(disk_to_dev(disk));
- spin_unlock(&ub->lock);
+ struct ublk_device *ub = ubq->dev;
+ struct gendisk *disk = ublk_get_disk(ub);
/* Our disk has been dead */
if (!disk)
- return false;
-
+ return;
/*
* Now we are serialized with ublk_queue_rq()
*
@@ -1514,25 +1652,36 @@ static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
* touch completed uring_cmd
*/
blk_mq_quiesce_queue(disk->queue);
- was_canceled = ublk_mark_queue_canceling(ubq);
- if (!was_canceled) {
- /* abort queue is for making forward progress */
- ublk_abort_queue(ub, ubq);
- }
+ ublk_mark_queue_canceling(ubq);
blk_mq_unquiesce_queue(disk->queue);
- put_device(disk_to_dev(disk));
-
- return !was_canceled;
+ ublk_put_disk(disk);
}
-static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io,
+static void ublk_cancel_cmd(struct ublk_queue *ubq, unsigned tag,
unsigned int issue_flags)
{
+ struct ublk_io *io = &ubq->ios[tag];
+ struct ublk_device *ub = ubq->dev;
+ struct request *req;
bool done;
if (!(io->flags & UBLK_IO_FLAG_ACTIVE))
return;
+ /*
+ * Don't try to cancel this command if the request is started for
+ * avoiding race between io_uring_cmd_done() and
+ * io_uring_cmd_complete_in_task().
+ *
+ * Either the started request will be aborted via __ublk_abort_rq(),
+ * then this uring_cmd is canceled next time, or it will be done in
+ * task work function ublk_dispatch_req() because io_uring guarantees
+ * that ublk_dispatch_req() is always called
+ */
+ req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
+ if (req && blk_mq_request_started(req) && req->tag == tag)
+ return;
+
spin_lock(&ubq->cancel_lock);
done = !!(io->flags & UBLK_IO_FLAG_CANCELED);
if (!done)
@@ -1546,6 +1695,17 @@ static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io,
/*
* The ublk char device won't be closed when calling cancel fn, so both
* ublk device and queue are guaranteed to be live
+ *
+ * Two-stage cancel:
+ *
+ * - make every active uring_cmd done in ->cancel_fn()
+ *
+ * - aborting inflight ublk IO requests in ublk char device release handler,
+ * which depends on 1st stage because device can only be closed iff all
+ * uring_cmd are done
+ *
+ * Do _not_ try to acquire ub->mutex before all inflight requests are
+ * aborted, otherwise deadlock may be caused.
*/
static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
unsigned int issue_flags)
@@ -1553,9 +1713,6 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct ublk_queue *ubq = pdu->ubq;
struct task_struct *task;
- struct ublk_device *ub;
- bool need_schedule;
- struct ublk_io *io;
if (WARN_ON_ONCE(!ubq))
return;
@@ -1567,16 +1724,11 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
if (WARN_ON_ONCE(task && task != ubq->ubq_daemon))
return;
- ub = ubq->dev;
- need_schedule = ublk_abort_requests(ub, ubq);
-
- io = &ubq->ios[pdu->tag];
- WARN_ON_ONCE(io->cmd != cmd);
- ublk_cancel_cmd(ubq, io, issue_flags);
+ if (!ubq->canceling)
+ ublk_start_cancel(ubq);
- if (need_schedule) {
- schedule_work(&ub->nosrv_work);
- }
+ WARN_ON_ONCE(ubq->ios[pdu->tag].cmd != cmd);
+ ublk_cancel_cmd(ubq, pdu->tag, issue_flags);
}
static inline bool ublk_queue_ready(struct ublk_queue *ubq)
@@ -1589,7 +1741,7 @@ static void ublk_cancel_queue(struct ublk_queue *ubq)
int i;
for (i = 0; i < ubq->q_depth; i++)
- ublk_cancel_cmd(ubq, &ubq->ios[i], IO_URING_F_UNLOCKED);
+ ublk_cancel_cmd(ubq, i, IO_URING_F_UNLOCKED);
}
/* Cancel all pending commands, must be called after del_gendisk() returns */
@@ -1627,33 +1779,20 @@ static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub)
}
}
-static void __ublk_quiesce_dev(struct ublk_device *ub)
-{
- pr_devel("%s: quiesce ub: dev_id %d state %s\n",
- __func__, ub->dev_info.dev_id,
- ub->dev_info.state == UBLK_S_DEV_LIVE ?
- "LIVE" : "QUIESCED");
- blk_mq_quiesce_queue(ub->ub_disk->queue);
- ublk_wait_tagset_rqs_idle(ub);
- ub->dev_info.state = UBLK_S_DEV_QUIESCED;
-}
-
-static void ublk_unquiesce_dev(struct ublk_device *ub)
+static void ublk_force_abort_dev(struct ublk_device *ub)
{
int i;
- pr_devel("%s: unquiesce ub: dev_id %d state %s\n",
+ pr_devel("%s: force abort ub: dev_id %d state %s\n",
__func__, ub->dev_info.dev_id,
ub->dev_info.state == UBLK_S_DEV_LIVE ?
"LIVE" : "QUIESCED");
- /* quiesce_work has run. We let requeued rqs be aborted
- * before running fallback_wq. "force_abort" must be seen
- * after request queue is unqiuesced. Then del_gendisk()
- * can move on.
- */
+ blk_mq_quiesce_queue(ub->ub_disk->queue);
+ if (ub->dev_info.state == UBLK_S_DEV_LIVE)
+ ublk_wait_tagset_rqs_idle(ub);
+
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
ublk_get_queue(ub, i)->force_abort = true;
-
blk_mq_unquiesce_queue(ub->ub_disk->queue);
/* We may have requeued some rqs in ublk_quiesce_queue() */
blk_mq_kick_requeue_list(ub->ub_disk->queue);
@@ -1674,61 +1813,51 @@ static struct gendisk *ublk_detach_disk(struct ublk_device *ub)
return disk;
}
-static void ublk_stop_dev(struct ublk_device *ub)
+static void ublk_stop_dev_unlocked(struct ublk_device *ub)
+ __must_hold(&ub->mutex)
{
struct gendisk *disk;
- mutex_lock(&ub->mutex);
if (ub->dev_info.state == UBLK_S_DEV_DEAD)
- goto unlock;
- if (ublk_nosrv_dev_should_queue_io(ub)) {
- if (ub->dev_info.state == UBLK_S_DEV_LIVE)
- __ublk_quiesce_dev(ub);
- ublk_unquiesce_dev(ub);
- }
+ return;
+
+ if (ublk_nosrv_dev_should_queue_io(ub))
+ ublk_force_abort_dev(ub);
del_gendisk(ub->ub_disk);
disk = ublk_detach_disk(ub);
put_disk(disk);
- unlock:
+}
+
+static void ublk_stop_dev(struct ublk_device *ub)
+{
+ mutex_lock(&ub->mutex);
+ ublk_stop_dev_unlocked(ub);
mutex_unlock(&ub->mutex);
ublk_cancel_dev(ub);
}
-static void ublk_nosrv_work(struct work_struct *work)
+/* reset ublk io_uring queue & io flags */
+static void ublk_reset_io_flags(struct ublk_device *ub)
{
- struct ublk_device *ub =
- container_of(work, struct ublk_device, nosrv_work);
- int i;
+ int i, j;
- if (ublk_nosrv_should_stop_dev(ub)) {
- ublk_stop_dev(ub);
- return;
- }
-
- mutex_lock(&ub->mutex);
- if (ub->dev_info.state != UBLK_S_DEV_LIVE)
- goto unlock;
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
- if (ublk_nosrv_dev_should_queue_io(ub)) {
- __ublk_quiesce_dev(ub);
- } else {
- blk_mq_quiesce_queue(ub->ub_disk->queue);
- ub->dev_info.state = UBLK_S_DEV_FAIL_IO;
- for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
- ublk_get_queue(ub, i)->fail_io = true;
- }
- blk_mq_unquiesce_queue(ub->ub_disk->queue);
+ /* UBLK_IO_FLAG_CANCELED can be cleared now */
+ spin_lock(&ubq->cancel_lock);
+ for (j = 0; j < ubq->q_depth; j++)
+ ubq->ios[j].flags &= ~UBLK_IO_FLAG_CANCELED;
+ spin_unlock(&ubq->cancel_lock);
+ ubq->canceling = false;
+ ubq->fail_io = false;
}
-
- unlock:
- mutex_unlock(&ub->mutex);
- ublk_cancel_dev(ub);
}
/* device can only be started after all IOs are ready */
static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
+ __must_hold(&ub->mutex)
{
- mutex_lock(&ub->mutex);
ubq->nr_io_ready++;
if (ublk_queue_ready(ubq)) {
ubq->ubq_daemon = current;
@@ -1738,18 +1867,12 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
if (capable(CAP_SYS_ADMIN))
ub->nr_privileged_daemon++;
}
- if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
- complete_all(&ub->completion);
- mutex_unlock(&ub->mutex);
-}
-
-static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
- int tag)
-{
- struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
- struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
- ublk_queue_cmd(ubq, req);
+ if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) {
+ /* now we are ready for handling ublk io request */
+ ublk_reset_io_flags(ub);
+ complete_all(&ub->completion);
+ }
}
static inline int ublk_check_cmd_op(u32 cmd_op)
@@ -1788,6 +1911,52 @@ static inline void ublk_prep_cancel(struct io_uring_cmd *cmd,
io_uring_cmd_mark_cancelable(cmd, issue_flags);
}
+static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq,
+ struct ublk_io *io, __u64 buf_addr)
+{
+ struct ublk_device *ub = ubq->dev;
+ int ret = 0;
+
+ /*
+ * When handling FETCH command for setting up ublk uring queue,
+ * ub->mutex is the innermost lock, and we won't block for handling
+ * FETCH, so it is fine even for IO_URING_F_NONBLOCK.
+ */
+ mutex_lock(&ub->mutex);
+ /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
+ if (ublk_queue_ready(ubq)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* allow each command to be FETCHed at most once */
+ if (io->flags & UBLK_IO_FLAG_ACTIVE) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV);
+
+ if (ublk_need_map_io(ubq)) {
+ /*
+ * FETCH_RQ has to provide IO buffer if NEED GET
+ * DATA is not enabled
+ */
+ if (!buf_addr && !ublk_need_get_data(ubq))
+ goto out;
+ } else if (buf_addr) {
+ /* User copy requires addr to be unset */
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ublk_fill_io_cmd(io, cmd, buf_addr);
+ ublk_mark_io_ready(ub, ubq);
+out:
+ mutex_unlock(&ub->mutex);
+ return ret;
+}
+
static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags,
const struct ublksrv_io_cmd *ub_cmd)
@@ -1840,33 +2009,9 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
ret = -EINVAL;
switch (_IOC_NR(cmd_op)) {
case UBLK_IO_FETCH_REQ:
- /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
- if (ublk_queue_ready(ubq)) {
- ret = -EBUSY;
- goto out;
- }
- /*
- * The io is being handled by server, so COMMIT_RQ is expected
- * instead of FETCH_REQ
- */
- if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
- goto out;
-
- if (!ublk_support_user_copy(ubq)) {
- /*
- * FETCH_RQ has to provide IO buffer if NEED GET
- * DATA is not enabled
- */
- if (!ub_cmd->addr && !ublk_need_get_data(ubq))
- goto out;
- } else if (ub_cmd->addr) {
- /* User copy requires addr to be unset */
- ret = -EINVAL;
+ ret = ublk_fetch(cmd, ubq, io, ub_cmd->addr);
+ if (ret)
goto out;
- }
-
- ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
- ublk_mark_io_ready(ub, ubq);
break;
case UBLK_IO_COMMIT_AND_FETCH_REQ:
req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
@@ -1874,7 +2019,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
goto out;
- if (!ublk_support_user_copy(ubq)) {
+ if (ublk_need_map_io(ubq)) {
/*
* COMMIT_AND_FETCH_REQ has to provide IO buffer if
* NEED GET DATA is not enabled or it is Read IO.
@@ -1898,8 +2043,9 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
goto out;
ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
- ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag);
- break;
+ req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
+ ublk_dispatch_req(ubq, req, issue_flags);
+ return -EIOCBQUEUED;
default:
goto out;
}
@@ -1907,10 +2053,9 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
return -EIOCBQUEUED;
out:
- io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
__func__, cmd_op, tag, ret, io->flags);
- return -EIOCBQUEUED;
+ return ret;
}
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
@@ -1966,7 +2111,10 @@ static inline int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
static void ublk_ch_uring_cmd_cb(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
- ublk_ch_uring_cmd_local(cmd, issue_flags);
+ int ret = ublk_ch_uring_cmd_local(cmd, issue_flags);
+
+ if (ret != -EIOCBQUEUED)
+ io_uring_cmd_done(cmd, ret, 0, issue_flags);
}
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
@@ -2231,7 +2379,8 @@ static int ublk_add_chdev(struct ublk_device *ub)
if (ret)
goto fail;
- ublks_added++;
+ if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV)
+ unprivileged_ublks_added++;
return 0;
fail:
put_device(dev);
@@ -2260,11 +2409,15 @@ static int ublk_add_tag_set(struct ublk_device *ub)
static void ublk_remove(struct ublk_device *ub)
{
+ bool unprivileged;
+
ublk_stop_dev(ub);
- cancel_work_sync(&ub->nosrv_work);
cdev_device_del(&ub->cdev, &ub->cdev_dev);
+ unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
ublk_put_device(ub);
- ublks_added--;
+
+ if (unprivileged)
+ unprivileged_ublks_added--;
}
static struct ublk_device *ublk_get_device_from_id(int idx)
@@ -2526,7 +2679,8 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
return ret;
ret = -EACCES;
- if (ublks_added >= ublks_max)
+ if ((info.flags & UBLK_F_UNPRIVILEGED_DEV) &&
+ unprivileged_ublks_added >= unprivileged_ublks_max)
goto out_unlock;
ret = -ENOMEM;
@@ -2535,7 +2689,6 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
goto out_unlock;
mutex_init(&ub->mutex);
spin_lock_init(&ub->lock);
- INIT_WORK(&ub->nosrv_work, ublk_nosrv_work);
ret = ublk_alloc_dev_number(ub, header->dev_id);
if (ret < 0)
@@ -2670,7 +2823,6 @@ static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
static int ublk_ctrl_stop_dev(struct ublk_device *ub)
{
ublk_stop_dev(ub);
- cancel_work_sync(&ub->nosrv_work);
return 0;
}
@@ -2777,43 +2929,15 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
return ret;
}
-static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
-{
- int i;
-
- WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
-
- /* All old ioucmds have to be completed */
- ubq->nr_io_ready = 0;
- /* old daemon is PF_EXITING, put it now */
- put_task_struct(ubq->ubq_daemon);
- /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
- ubq->ubq_daemon = NULL;
- ubq->timeout = false;
- ubq->canceling = false;
-
- for (i = 0; i < ubq->q_depth; i++) {
- struct ublk_io *io = &ubq->ios[i];
-
- /* forget everything now and be ready for new FETCH_REQ */
- io->flags = 0;
- io->cmd = NULL;
- io->addr = 0;
- }
-}
-
static int ublk_ctrl_start_recovery(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ret = -EINVAL;
- int i;
mutex_lock(&ub->mutex);
if (ublk_nosrv_should_stop_dev(ub))
goto out_unlock;
- if (!ub->nr_queues_ready)
- goto out_unlock;
/*
* START_RECOVERY is only allowd after:
*
@@ -2837,12 +2961,6 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
goto out_unlock;
}
pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id);
- for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
- ublk_queue_reinit(ub, ublk_get_queue(ub, i));
- /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
- ub->mm = NULL;
- ub->nr_queues_ready = 0;
- ub->nr_privileged_daemon = 0;
init_completion(&ub->completion);
ret = 0;
out_unlock:
@@ -2856,7 +2974,6 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
int ublksrv_pid = (int)header->data[0];
int ret = -EINVAL;
- int i;
pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
__func__, ub->dev_info.nr_hw_queues, header->dev_id);
@@ -2876,24 +2993,10 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
goto out_unlock;
}
ub->dev_info.ublksrv_pid = ublksrv_pid;
+ ub->dev_info.state = UBLK_S_DEV_LIVE;
pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
__func__, ublksrv_pid, header->dev_id);
-
- if (ublk_nosrv_dev_should_queue_io(ub)) {
- ub->dev_info.state = UBLK_S_DEV_LIVE;
- blk_mq_unquiesce_queue(ub->ub_disk->queue);
- pr_devel("%s: queue unquiesced, dev id %d.\n",
- __func__, header->dev_id);
- blk_mq_kick_requeue_list(ub->ub_disk->queue);
- } else {
- blk_mq_quiesce_queue(ub->ub_disk->queue);
- ub->dev_info.state = UBLK_S_DEV_LIVE;
- for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
- ublk_get_queue(ub, i)->fail_io = false;
- }
- blk_mq_unquiesce_queue(ub->ub_disk->queue);
- }
-
+ blk_mq_kick_requeue_list(ub->ub_disk->queue);
ret = 0;
out_unlock:
mutex_unlock(&ub->mutex);
@@ -3100,10 +3203,9 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
if (ub)
ublk_put_device(ub);
out:
- io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
__func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
- return -EIOCBQUEUED;
+ return ret;
}
static const struct file_operations ublk_ctl_fops = {
@@ -3167,23 +3269,26 @@ static void __exit ublk_exit(void)
module_init(ublk_init);
module_exit(ublk_exit);
-static int ublk_set_max_ublks(const char *buf, const struct kernel_param *kp)
+static int ublk_set_max_unprivileged_ublks(const char *buf,
+ const struct kernel_param *kp)
{
return param_set_uint_minmax(buf, kp, 0, UBLK_MAX_UBLKS);
}
-static int ublk_get_max_ublks(char *buf, const struct kernel_param *kp)
+static int ublk_get_max_unprivileged_ublks(char *buf,
+ const struct kernel_param *kp)
{
- return sysfs_emit(buf, "%u\n", ublks_max);
+ return sysfs_emit(buf, "%u\n", unprivileged_ublks_max);
}
-static const struct kernel_param_ops ublk_max_ublks_ops = {
- .set = ublk_set_max_ublks,
- .get = ublk_get_max_ublks,
+static const struct kernel_param_ops ublk_max_unprivileged_ublks_ops = {
+ .set = ublk_set_max_unprivileged_ublks,
+ .get = ublk_get_max_unprivileged_ublks,
};
-module_param_cb(ublks_max, &ublk_max_ublks_ops, &ublks_max, 0644);
-MODULE_PARM_DESC(ublks_max, "max number of ublk devices allowed to add(default: 64)");
+module_param_cb(ublks_max, &ublk_max_unprivileged_ublks_ops,
+ &unprivileged_ublks_max, 0644);
+MODULE_PARM_DESC(ublks_max, "max number of unprivileged ublk devices allowed to add(default: 64)");
MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
MODULE_DESCRIPTION("Userspace block device");
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 6130854b6658..1636f636fbef 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -595,8 +595,10 @@ static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
/* This is a debug event that comes from IML and OP image when it
* starts execution. There is no need pass this event to stack.
*/
- if (skb->data[2] == 0x97)
+ if (skb->data[2] == 0x97) {
+ hci_recv_diag(hdev, skb);
return 0;
+ }
}
return hci_recv_frame(hdev, skb);
@@ -612,7 +614,6 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
u8 pkt_type;
u16 plen;
u32 pcie_pkt_type;
- struct sk_buff *new_skb;
void *pdata;
struct hci_dev *hdev = data->hdev;
@@ -689,24 +690,20 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen);
- new_skb = bt_skb_alloc(plen, GFP_ATOMIC);
- if (!new_skb) {
- bt_dev_err(hdev, "Failed to allocate memory for skb of len: %u",
- skb->len);
- ret = -ENOMEM;
- goto exit_error;
- }
-
- hci_skb_pkt_type(new_skb) = pkt_type;
- skb_put_data(new_skb, skb->data, plen);
+ hci_skb_pkt_type(skb) = pkt_type;
hdev->stat.byte_rx += plen;
+ skb_trim(skb, plen);
if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
- ret = btintel_pcie_recv_event(hdev, new_skb);
+ ret = btintel_pcie_recv_event(hdev, skb);
else
- ret = hci_recv_frame(hdev, new_skb);
+ ret = hci_recv_frame(hdev, skb);
+ skb = NULL; /* skb is freed in the callee */
exit_error:
+ if (skb)
+ kfree_skb(skb);
+
if (ret)
hdev->stat.err_rx++;
@@ -720,16 +717,10 @@ static void btintel_pcie_rx_work(struct work_struct *work)
struct btintel_pcie_data *data = container_of(work,
struct btintel_pcie_data, rx_work);
struct sk_buff *skb;
- int err;
- struct hci_dev *hdev = data->hdev;
/* Process the sk_buf in queue and send to the HCI layer */
while ((skb = skb_dequeue(&data->rx_skb_q))) {
- err = btintel_pcie_recv_frame(data, skb);
- if (err)
- bt_dev_err(hdev, "Failed to send received frame: %d",
- err);
- kfree_skb(skb);
+ btintel_pcie_recv_frame(data, skb);
}
}
@@ -782,10 +773,8 @@ static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data *data)
bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia);
/* Check CR_TIA and CR_HIA for change */
- if (cr_tia == cr_hia) {
- bt_dev_warn(hdev, "RXQ: no new CD found");
+ if (cr_tia == cr_hia)
return;
- }
rxq = &data->rxq;
@@ -821,6 +810,16 @@ static irqreturn_t btintel_pcie_msix_isr(int irq, void *data)
return IRQ_WAKE_THREAD;
}
+static inline bool btintel_pcie_is_rxq_empty(struct btintel_pcie_data *data)
+{
+ return data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM] == data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
+}
+
+static inline bool btintel_pcie_is_txackq_empty(struct btintel_pcie_data *data)
+{
+ return data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] == data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
+}
+
static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
{
struct msix_entry *entry = dev_id;
@@ -848,12 +847,18 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
btintel_pcie_msix_gp0_handler(data);
/* For TX */
- if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0)
+ if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
btintel_pcie_msix_tx_handle(data);
+ if (!btintel_pcie_is_rxq_empty(data))
+ btintel_pcie_msix_rx_handle(data);
+ }
/* For RX */
- if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1)
+ if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) {
btintel_pcie_msix_rx_handle(data);
+ if (!btintel_pcie_is_txackq_empty(data))
+ btintel_pcie_msix_tx_handle(data);
+ }
/*
* Before sending the interrupt the HW disables it to prevent a nested
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index 68846c5bd4f7..4390fd571dbd 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -1330,13 +1330,6 @@ int btmtk_usb_setup(struct hci_dev *hdev)
break;
case 0x7922:
case 0x7925:
- /* Reset the device to ensure it's in the initial state before
- * downloading the firmware to ensure.
- */
-
- if (!test_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags))
- btmtk_usb_subsys_reset(hdev, dev_id);
- fallthrough;
case 0x7961:
btmtk_fw_get_filename(fw_bin_name, sizeof(fw_bin_name), dev_id,
fw_version, fw_flavor);
@@ -1345,12 +1338,9 @@ int btmtk_usb_setup(struct hci_dev *hdev)
btmtk_usb_hci_wmt_sync);
if (err < 0) {
bt_dev_err(hdev, "Failed to set up firmware (%d)", err);
- clear_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags);
return err;
}
- set_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags);
-
/* It's Device EndPoint Reset Option Register */
err = btmtk_usb_uhw_reg_write(hdev, MTK_EP_RST_OPT,
MTK_EP_RST_IN_OUT_OPT);
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index bd5464bde174..1d26207b2ba7 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -610,7 +610,8 @@ static void btmtksdio_txrx_work(struct work_struct *work)
} while (int_status || time_is_before_jiffies(txrx_timeout));
/* Enable interrupt */
- sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL);
+ if (bdev->func->irq_handler)
+ sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL);
sdio_release_host(bdev->func);
@@ -722,6 +723,10 @@ static int btmtksdio_close(struct hci_dev *hdev)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ /* Skip btmtksdio_close if BTMTKSDIO_FUNC_ENABLED isn't set */
+ if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
+ return 0;
+
sdio_claim_host(bdev->func);
/* Disable interrupt */
@@ -1442,11 +1447,15 @@ static void btmtksdio_remove(struct sdio_func *func)
if (!bdev)
return;
+ hdev = bdev->hdev;
+
+ /* Make sure to call btmtksdio_close before removing sdio card */
+ if (test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state))
+ btmtksdio_close(hdev);
+
/* Be consistent the state in btmtksdio_probe */
pm_runtime_get_noresume(bdev->dev);
- hdev = bdev->hdev;
-
sdio_set_drvdata(func, NULL);
hci_unregister_dev(hdev);
hci_free_dev(hdev);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index bfd769f2026b..b15f3ed767c5 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -3010,55 +3010,27 @@ static void btusb_coredump_qca(struct hci_dev *hdev)
bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
}
-/*
- * ==0: not a dump pkt.
- * < 0: fails to handle a dump pkt
- * > 0: otherwise.
- */
+/* Return: 0 on success, negative errno on failure. */
static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
- int ret = 1;
+ int ret = 0;
+ unsigned int skip = 0;
u8 pkt_type;
- u8 *sk_ptr;
- unsigned int sk_len;
u16 seqno;
u32 dump_size;
- struct hci_event_hdr *event_hdr;
- struct hci_acl_hdr *acl_hdr;
struct qca_dump_hdr *dump_hdr;
struct btusb_data *btdata = hci_get_drvdata(hdev);
struct usb_device *udev = btdata->udev;
pkt_type = hci_skb_pkt_type(skb);
- sk_ptr = skb->data;
- sk_len = skb->len;
-
- if (pkt_type == HCI_ACLDATA_PKT) {
- acl_hdr = hci_acl_hdr(skb);
- if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
- return 0;
- sk_ptr += HCI_ACL_HDR_SIZE;
- sk_len -= HCI_ACL_HDR_SIZE;
- event_hdr = (struct hci_event_hdr *)sk_ptr;
- } else {
- event_hdr = hci_event_hdr(skb);
- }
-
- if ((event_hdr->evt != HCI_VENDOR_PKT)
- || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
- return 0;
-
- sk_ptr += HCI_EVENT_HDR_SIZE;
- sk_len -= HCI_EVENT_HDR_SIZE;
+ skip = sizeof(struct hci_event_hdr);
+ if (pkt_type == HCI_ACLDATA_PKT)
+ skip += sizeof(struct hci_acl_hdr);
- dump_hdr = (struct qca_dump_hdr *)sk_ptr;
- if ((sk_len < offsetof(struct qca_dump_hdr, data))
- || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS)
- || (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
- return 0;
+ skb_pull(skb, skip);
+ dump_hdr = (struct qca_dump_hdr *)skb->data;
- /*it is dump pkt now*/
seqno = le16_to_cpu(dump_hdr->seqno);
if (seqno == 0) {
set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags);
@@ -3078,16 +3050,15 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
btdata->qca_dump.ram_dump_size = dump_size;
btdata->qca_dump.ram_dump_seqno = 0;
- sk_ptr += offsetof(struct qca_dump_hdr, data0);
- sk_len -= offsetof(struct qca_dump_hdr, data0);
+
+ skb_pull(skb, offsetof(struct qca_dump_hdr, data0));
usb_disable_autosuspend(udev);
bt_dev_info(hdev, "%s memdump size(%u)\n",
(pkt_type == HCI_ACLDATA_PKT) ? "ACL" : "event",
dump_size);
} else {
- sk_ptr += offsetof(struct qca_dump_hdr, data);
- sk_len -= offsetof(struct qca_dump_hdr, data);
+ skb_pull(skb, offsetof(struct qca_dump_hdr, data));
}
if (!btdata->qca_dump.ram_dump_size) {
@@ -3107,7 +3078,6 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
return ret;
}
- skb_pull(skb, skb->len - sk_len);
hci_devcd_append(hdev, skb);
btdata->qca_dump.ram_dump_seqno++;
if (seqno == QCA_LAST_SEQUENCE_NUM) {
@@ -3132,17 +3102,74 @@ out:
return ret;
}
+/* Return: true if the ACL packet is a dump packet, false otherwise. */
+static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_event_hdr *event_hdr;
+ struct hci_acl_hdr *acl_hdr;
+ struct qca_dump_hdr *dump_hdr;
+ struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
+ bool is_dump = false;
+
+ if (!clone)
+ return false;
+
+ acl_hdr = skb_pull_data(clone, sizeof(*acl_hdr));
+ if (!acl_hdr || (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE))
+ goto out;
+
+ event_hdr = skb_pull_data(clone, sizeof(*event_hdr));
+ if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT))
+ goto out;
+
+ dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr));
+ if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
+ (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
+ goto out;
+
+ is_dump = true;
+out:
+ consume_skb(clone);
+ return is_dump;
+}
+
+/* Return: true if the event packet is a dump packet, false otherwise. */
+static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_event_hdr *event_hdr;
+ struct qca_dump_hdr *dump_hdr;
+ struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
+ bool is_dump = false;
+
+ if (!clone)
+ return false;
+
+ event_hdr = skb_pull_data(clone, sizeof(*event_hdr));
+ if (!event_hdr || (event_hdr->evt != HCI_VENDOR_PKT))
+ goto out;
+
+ dump_hdr = skb_pull_data(clone, sizeof(*dump_hdr));
+ if (!dump_hdr || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
+ (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
+ goto out;
+
+ is_dump = true;
+out:
+ consume_skb(clone);
+ return is_dump;
+}
+
static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
- if (handle_dump_pkt_qca(hdev, skb))
- return 0;
+ if (acl_pkt_is_dump_qca(hdev, skb))
+ return handle_dump_pkt_qca(hdev, skb);
return hci_recv_frame(hdev, skb);
}
static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
- if (handle_dump_pkt_qca(hdev, skb))
- return 0;
+ if (evt_pkt_is_dump_qca(hdev, skb))
+ return handle_dump_pkt_qca(hdev, skb);
return hci_recv_frame(hdev, skb);
}
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index f2558506a02c..044433567959 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -2415,14 +2415,14 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR(qcadev->bt_en) &&
- (data->soc_type == QCA_WCN6750 ||
- data->soc_type == QCA_WCN6855)) {
- dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
- return PTR_ERR(qcadev->bt_en);
- }
+ if (IS_ERR(qcadev->bt_en))
+ return dev_err_probe(&serdev->dev,
+ PTR_ERR(qcadev->bt_en),
+ "failed to acquire BT_EN gpio\n");
- if (!qcadev->bt_en)
+ if (!qcadev->bt_en &&
+ (data->soc_type == QCA_WCN6750 ||
+ data->soc_type == QCA_WCN6855))
power_ctrl_enabled = false;
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index f7dd455dd0dd..dda466f9181a 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -315,7 +315,7 @@ static int __init misc_init(void)
goto fail_remove;
err = -EIO;
- if (register_chrdev(MISC_MAJOR, "misc", &misc_fops))
+ if (__register_chrdev(MISC_MAJOR, 0, MINORMASK + 1, "misc", &misc_fops))
goto fail_printk;
return 0;
diff --git a/drivers/char/tpm/tpm-buf.c b/drivers/char/tpm/tpm-buf.c
index e49a19fea3bd..dc882fc9fa9e 100644
--- a/drivers/char/tpm/tpm-buf.c
+++ b/drivers/char/tpm/tpm-buf.c
@@ -201,7 +201,7 @@ static void tpm_buf_read(struct tpm_buf *buf, off_t *offset, size_t count, void
*/
u8 tpm_buf_read_u8(struct tpm_buf *buf, off_t *offset)
{
- u8 value;
+ u8 value = 0;
tpm_buf_read(buf, offset, sizeof(value), &value);
@@ -218,7 +218,7 @@ EXPORT_SYMBOL_GPL(tpm_buf_read_u8);
*/
u16 tpm_buf_read_u16(struct tpm_buf *buf, off_t *offset)
{
- u16 value;
+ u16 value = 0;
tpm_buf_read(buf, offset, sizeof(value), &value);
@@ -235,7 +235,7 @@ EXPORT_SYMBOL_GPL(tpm_buf_read_u16);
*/
u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset)
{
- u32 value;
+ u32 value = 0;
tpm_buf_read(buf, offset, sizeof(value), &value);
diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
index b70165b588ec..7b5049b3d476 100644
--- a/drivers/char/tpm/tpm2-sessions.c
+++ b/drivers/char/tpm/tpm2-sessions.c
@@ -40,11 +40,6 @@
*
* These are the usage functions:
*
- * tpm2_start_auth_session() which allocates the opaque auth structure
- * and gets a session from the TPM. This must be called before
- * any of the following functions. The session is protected by a
- * session_key which is derived from a random salt value
- * encrypted to the NULL seed.
* tpm2_end_auth_session() kills the session and frees the resources.
* Under normal operation this function is done by
* tpm_buf_check_hmac_response(), so this is only to be used on
@@ -963,16 +958,13 @@ err:
}
/**
- * tpm2_start_auth_session() - create a HMAC authentication session with the TPM
- * @chip: the TPM chip structure to create the session with
+ * tpm2_start_auth_session() - Create an a HMAC authentication session
+ * @chip: A TPM chip
*
- * This function loads the NULL seed from its saved context and starts
- * an authentication session on the null seed, fills in the
- * @chip->auth structure to contain all the session details necessary
- * for performing the HMAC, encrypt and decrypt operations and
- * returns. The NULL seed is flushed before this function returns.
+ * Loads the ephemeral key (null seed), and starts an HMAC authenticated
+ * session. The null seed is flushed before the return.
*
- * Return: zero on success or actual error encountered.
+ * Returns zero on success, or a POSIX error code.
*/
int tpm2_start_auth_session(struct tpm_chip *chip)
{
@@ -982,7 +974,7 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
int rc;
if (chip->auth) {
- dev_warn_once(&chip->dev, "auth session is active\n");
+ dev_dbg_once(&chip->dev, "auth session is active\n");
return 0;
}
@@ -1024,7 +1016,7 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
/* hash algorithm for session */
tpm_buf_append_u16(&buf, TPM_ALG_SHA256);
- rc = tpm_transmit_cmd(chip, &buf, 0, "start auth session");
+ rc = tpm_ret_to_err(tpm_transmit_cmd(chip, &buf, 0, "StartAuthSession"));
tpm2_flush_context(chip, null_key);
if (rc == TPM2_RC_SUCCESS)
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 970d02c337c7..6c3aa480396b 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -54,7 +54,7 @@ enum tis_int_flags {
enum tis_defaults {
TIS_MEM_LEN = 0x5000,
TIS_SHORT_TIMEOUT = 750, /* ms */
- TIS_LONG_TIMEOUT = 2000, /* 2 sec */
+ TIS_LONG_TIMEOUT = 4000, /* 4 secs */
TIS_TIMEOUT_MIN_ATML = 14700, /* usecs */
TIS_TIMEOUT_MAX_ATML = 15000, /* usecs */
};
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 18f92dd44d45..fc698e2b1da1 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1579,8 +1579,8 @@ static void handle_control_message(struct virtio_device *vdev,
break;
case VIRTIO_CONSOLE_RESIZE: {
struct {
- __u16 rows;
- __u16 cols;
+ __virtio16 rows;
+ __virtio16 cols;
} size;
if (!is_console_port(port))
@@ -1588,7 +1588,8 @@ static void handle_control_message(struct virtio_device *vdev,
memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
sizeof(size));
- set_console_size(port, size.rows, size.cols);
+ set_console_size(port, virtio16_to_cpu(vdev, size.rows),
+ virtio16_to_cpu(vdev, size.cols));
port->cons.hvc->irq_requested = 1;
resize_console(port);
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 014db6386624..8ddf3a9a53df 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -137,6 +137,8 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
+ clk_data->num = S2MPS11_CLKS_NUM;
+
switch (hwid) {
case S2MPS11X:
s2mps11_reg = S2MPS11_REG_RTC_CTRL;
@@ -186,7 +188,6 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
clk_data->hws[i] = &s2mps11_clks[i].hw;
}
- clk_data->num = S2MPS11_CLKS_NUM;
of_clk_add_hw_provider(s2mps11_clks->clk_np, of_clk_hw_onecell_get,
clk_data);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index cf7720b9172f..50faafbf5dda 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -5258,6 +5258,10 @@ of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
if (!clkspec)
return ERR_PTR(-EINVAL);
+ /* Check if node in clkspec is in disabled/fail state */
+ if (!of_device_is_available(clkspec->np))
+ return ERR_PTR(-ENOENT);
+
mutex_lock(&of_clk_mutex);
list_for_each_entry(provider, &of_clk_providers, link) {
if (provider->node == clkspec->np) {
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index fb18f507f121..fe6dac70f1a1 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -8,6 +8,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/units.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -406,11 +407,151 @@ static const char * const imx8mp_clkout_sels[] = {"audio_pll1_out", "audio_pll2_
static struct clk_hw **hws;
static struct clk_hw_onecell_data *clk_hw_data;
+struct imx8mp_clock_constraints {
+ unsigned int clkid;
+ u32 maxrate;
+};
+
+/*
+ * Below tables are taken from IMX8MPCEC Rev. 2.1, 07/2023
+ * Table 13. Maximum frequency of modules.
+ * Probable typos fixed are marked with a comment.
+ */
+static const struct imx8mp_clock_constraints imx8mp_clock_common_constraints[] = {
+ { IMX8MP_CLK_A53_DIV, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ENET_AXI, 266666667 }, /* Datasheet claims 266MHz */
+ { IMX8MP_CLK_NAND_USDHC_BUS, 266666667 }, /* Datasheet claims 266MHz */
+ { IMX8MP_CLK_MEDIA_APB, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HDMI_APB, 133333333 }, /* Datasheet claims 133MHz */
+ { IMX8MP_CLK_ML_AXI, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_AHB, 133333333 },
+ { IMX8MP_CLK_IPG_ROOT, 66666667 },
+ { IMX8MP_CLK_AUDIO_AHB, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_DISP2_PIX, 170 * HZ_PER_MHZ },
+ { IMX8MP_CLK_DRAM_ALT, 666666667 },
+ { IMX8MP_CLK_DRAM_APB, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_CAN1, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_CAN2, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_PCIE_AUX, 10 * HZ_PER_MHZ },
+ { IMX8MP_CLK_I2C5, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_I2C6, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_SAI1, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_SAI2, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_SAI3, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_SAI5, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_SAI6, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_ENET_QOS, 125 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ENET_QOS_TIMER, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ENET_REF, 125 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ENET_TIMER, 125 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ENET_PHY_REF, 125 * HZ_PER_MHZ },
+ { IMX8MP_CLK_NAND, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_QSPI, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_USDHC1, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_USDHC2, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_I2C1, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_I2C2, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_I2C3, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_I2C4, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_UART1, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_UART2, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_UART3, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_UART4, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ECSPI1, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ECSPI2, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_PWM1, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_PWM2, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_PWM3, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_PWM4, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_GPT1, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPT2, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPT3, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPT4, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPT5, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPT6, 100 * HZ_PER_MHZ },
+ { IMX8MP_CLK_WDOG, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_IPP_DO_CLKO1, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_IPP_DO_CLKO2, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HDMI_REF_266M, 266 * HZ_PER_MHZ },
+ { IMX8MP_CLK_USDHC3, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_MIPI_PHY1_REF, 300 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_DISP1_PIX, 250 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_CAM2_PIX, 277 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_LDB, 595 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_MIPI_TEST_BYTE, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ECSPI3, 80 * HZ_PER_MHZ },
+ { IMX8MP_CLK_PDM, 200 * HZ_PER_MHZ },
+ { IMX8MP_CLK_SAI7, 66666667 }, /* Datasheet claims 66MHz */
+ { IMX8MP_CLK_MAIN_AXI, 400 * HZ_PER_MHZ },
+ { /* Sentinel */ }
+};
+
+static const struct imx8mp_clock_constraints imx8mp_clock_nominal_constraints[] = {
+ { IMX8MP_CLK_M7_CORE, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ML_CORE, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU3D_CORE, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU3D_SHADER_CORE, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU2D_CORE, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_AUDIO_AXI_SRC, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HSIO_AXI, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_ISP, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_BUS, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_AXI, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HDMI_AXI, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU_AXI, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU_AHB, 300 * HZ_PER_MHZ },
+ { IMX8MP_CLK_NOC, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_NOC_IO, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ML_AHB, 300 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_G1, 600 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_G2, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_CAM1_PIX, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_VC8000E, 400 * HZ_PER_MHZ }, /* Datasheet claims 500MHz */
+ { IMX8MP_CLK_DRAM_CORE, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GIC, 400 * HZ_PER_MHZ },
+ { /* Sentinel */ }
+};
+
+static const struct imx8mp_clock_constraints imx8mp_clock_overdrive_constraints[] = {
+ { IMX8MP_CLK_M7_CORE, 800 * HZ_PER_MHZ},
+ { IMX8MP_CLK_ML_CORE, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU3D_CORE, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU3D_SHADER_CORE, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU2D_CORE, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_AUDIO_AXI_SRC, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HSIO_AXI, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_ISP, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_BUS, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_AXI, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_HDMI_AXI, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU_AXI, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GPU_AHB, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_NOC, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_NOC_IO, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_ML_AHB, 400 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_G1, 800 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_G2, 700 * HZ_PER_MHZ },
+ { IMX8MP_CLK_MEDIA_CAM1_PIX, 500 * HZ_PER_MHZ },
+ { IMX8MP_CLK_VPU_VC8000E, 500 * HZ_PER_MHZ }, /* Datasheet claims 400MHz */
+ { IMX8MP_CLK_DRAM_CORE, 1000 * HZ_PER_MHZ },
+ { IMX8MP_CLK_GIC, 500 * HZ_PER_MHZ },
+ { /* Sentinel */ }
+};
+
+static void imx8mp_clocks_apply_constraints(const struct imx8mp_clock_constraints constraints[])
+{
+ const struct imx8mp_clock_constraints *constr;
+
+ for (constr = constraints; constr->clkid; constr++)
+ clk_hw_set_rate_range(hws[constr->clkid], 0, constr->maxrate);
+}
+
static int imx8mp_clocks_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np;
void __iomem *anatop_base, *ccm_base;
+ const char *opmode;
int err;
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop");
@@ -715,6 +856,16 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
imx_check_clk_hws(hws, IMX8MP_CLK_END);
+ imx8mp_clocks_apply_constraints(imx8mp_clock_common_constraints);
+
+ err = of_property_read_string(np, "fsl,operating-mode", &opmode);
+ if (!err) {
+ if (!strcmp(opmode, "nominal"))
+ imx8mp_clocks_apply_constraints(imx8mp_clock_nominal_constraints);
+ else if (!strcmp(opmode, "overdrive"))
+ imx8mp_clocks_apply_constraints(imx8mp_clock_overdrive_constraints);
+ }
+
err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
if (err < 0) {
dev_err(dev, "failed to register hws for i.MX8MP\n");
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 69bbf62ba3cd..d470ed007854 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -217,7 +217,7 @@ config IPQ_GCC_4019
config IPQ_GCC_5018
tristate "IPQ5018 Global Clock Controller"
- depends on ARM64 || COMPILE_TEST
+ depends on ARM || ARM64 || COMPILE_TEST
help
Support for global clock controller on ipq5018 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
diff --git a/drivers/clk/qcom/camcc-sm8250.c b/drivers/clk/qcom/camcc-sm8250.c
index 34d2f17520dc..450ddbebd35f 100644
--- a/drivers/clk/qcom/camcc-sm8250.c
+++ b/drivers/clk/qcom/camcc-sm8250.c
@@ -411,7 +411,7 @@ static struct clk_rcg2 cam_cc_bps_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -433,7 +433,7 @@ static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -454,7 +454,7 @@ static struct clk_rcg2 cam_cc_cci_0_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -469,7 +469,7 @@ static struct clk_rcg2 cam_cc_cci_1_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -490,7 +490,7 @@ static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -511,7 +511,7 @@ static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -526,7 +526,7 @@ static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -556,7 +556,7 @@ static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -571,7 +571,7 @@ static struct clk_rcg2 cam_cc_csi4phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -586,7 +586,7 @@ static struct clk_rcg2 cam_cc_csi5phytimer_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -611,7 +611,7 @@ static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -634,7 +634,7 @@ static struct clk_rcg2 cam_cc_fd_core_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -649,7 +649,7 @@ static struct clk_rcg2 cam_cc_icp_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -673,7 +673,7 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = {
.parent_data = cam_cc_parent_data_2,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -710,7 +710,7 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -734,7 +734,7 @@ static struct clk_rcg2 cam_cc_ife_1_clk_src = {
.parent_data = cam_cc_parent_data_3,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -749,7 +749,7 @@ static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -771,7 +771,7 @@ static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -786,7 +786,7 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -810,7 +810,7 @@ static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
.parent_data = cam_cc_parent_data_4,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -825,7 +825,7 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -847,7 +847,7 @@ static struct clk_rcg2 cam_cc_mclk0_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -862,7 +862,7 @@ static struct clk_rcg2 cam_cc_mclk1_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -877,7 +877,7 @@ static struct clk_rcg2 cam_cc_mclk2_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -892,7 +892,7 @@ static struct clk_rcg2 cam_cc_mclk3_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -907,7 +907,7 @@ static struct clk_rcg2 cam_cc_mclk4_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -922,7 +922,7 @@ static struct clk_rcg2 cam_cc_mclk5_clk_src = {
.parent_data = cam_cc_parent_data_1,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -993,7 +993,7 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
.parent_data = cam_cc_parent_data_0,
.num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 9a65d14acf71..cec0afea8e44 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -709,14 +709,19 @@ clk_alpha_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 alpha_width = pll_alpha_width(pll);
- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+ if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
+ return 0;
+
+ if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl))
+ return 0;
- regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
if (ctl & PLL_ALPHA_EN) {
- regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low);
+ if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low))
+ return 0;
if (alpha_width > 32) {
- regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
- &high);
+ if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
+ &high))
+ return 0;
a = (u64)high << 32 | low;
} else {
a = low & GENMASK(alpha_width - 1, 0);
@@ -942,8 +947,11 @@ alpha_pll_huayra_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, alpha = 0, ctl, alpha_m, alpha_n;
- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
- regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+ if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
+ return 0;
+
+ if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl))
+ return 0;
if (ctl & PLL_ALPHA_EN) {
regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &alpha);
@@ -1137,8 +1145,11 @@ clk_trion_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, frac, alpha_width = pll_alpha_width(pll);
- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
- regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &frac);
+ if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
+ return 0;
+
+ if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &frac))
+ return 0;
return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width);
}
@@ -1196,7 +1207,8 @@ clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
u32 ctl;
- regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
+ if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl))
+ return 0;
ctl >>= PLL_POST_DIV_SHIFT;
ctl &= PLL_POST_DIV_MASK(pll);
@@ -1412,8 +1424,11 @@ static unsigned long alpha_pll_fabia_recalc_rate(struct clk_hw *hw,
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, frac, alpha_width = pll_alpha_width(pll);
- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
- regmap_read(pll->clkr.regmap, PLL_FRAC(pll), &frac);
+ if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
+ return 0;
+
+ if (regmap_read(pll->clkr.regmap, PLL_FRAC(pll), &frac))
+ return 0;
return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width);
}
@@ -1563,7 +1578,8 @@ clk_trion_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct regmap *regmap = pll->clkr.regmap;
u32 i, div = 1, val;
- regmap_read(regmap, PLL_USER_CTL(pll), &val);
+ if (regmap_read(regmap, PLL_USER_CTL(pll), &val))
+ return 0;
val >>= pll->post_div_shift;
val &= PLL_POST_DIV_MASK(pll);
@@ -2484,9 +2500,12 @@ static unsigned long alpha_pll_lucid_evo_recalc_rate(struct clk_hw *hw,
struct regmap *regmap = pll->clkr.regmap;
u32 l, frac;
- regmap_read(regmap, PLL_L_VAL(pll), &l);
+ if (regmap_read(regmap, PLL_L_VAL(pll), &l))
+ return 0;
l &= LUCID_EVO_PLL_L_VAL_MASK;
- regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac);
+
+ if (regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac))
+ return 0;
return alpha_pll_calc_rate(parent_rate, l, frac, pll_alpha_width(pll));
}
@@ -2699,7 +2718,8 @@ static unsigned long clk_rivian_evo_pll_recalc_rate(struct clk_hw *hw,
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l;
- regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
+ if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l))
+ return 0;
return parent_rate * l;
}
diff --git a/drivers/clk/qcom/lpassaudiocc-sc7280.c b/drivers/clk/qcom/lpassaudiocc-sc7280.c
index 45e726477086..22169da08a51 100644
--- a/drivers/clk/qcom/lpassaudiocc-sc7280.c
+++ b/drivers/clk/qcom/lpassaudiocc-sc7280.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/clk-provider.h>
@@ -713,14 +714,24 @@ static const struct qcom_reset_map lpass_audio_cc_sc7280_resets[] = {
[LPASS_AUDIO_SWR_WSA_CGCR] = { 0xb0, 1 },
};
+static const struct regmap_config lpass_audio_cc_sc7280_reset_regmap_config = {
+ .name = "lpassaudio_cc_reset",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .max_register = 0xc8,
+};
+
static const struct qcom_cc_desc lpass_audio_cc_reset_sc7280_desc = {
- .config = &lpass_audio_cc_sc7280_regmap_config,
+ .config = &lpass_audio_cc_sc7280_reset_regmap_config,
.resets = lpass_audio_cc_sc7280_resets,
.num_resets = ARRAY_SIZE(lpass_audio_cc_sc7280_resets),
};
static const struct of_device_id lpass_audio_cc_sc7280_match_table[] = {
- { .compatible = "qcom,sc7280-lpassaudiocc" },
+ { .compatible = "qcom,qcm6490-lpassaudiocc", .data = &lpass_audio_cc_reset_sc7280_desc },
+ { .compatible = "qcom,sc7280-lpassaudiocc", .data = &lpass_audio_cc_sc7280_desc },
{ }
};
MODULE_DEVICE_TABLE(of, lpass_audio_cc_sc7280_match_table);
@@ -752,13 +763,17 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev)
struct regmap *regmap;
int ret;
+ desc = device_get_match_data(&pdev->dev);
+
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,qcm6490-lpassaudiocc"))
+ return qcom_cc_probe_by_index(pdev, 1, desc);
+
ret = lpass_audio_setup_runtime_pm(pdev);
if (ret)
return ret;
lpass_audio_cc_sc7280_regmap_config.name = "lpassaudio_cc";
lpass_audio_cc_sc7280_regmap_config.max_register = 0x2f000;
- desc = &lpass_audio_cc_sc7280_desc;
regmap = qcom_cc_map(pdev, desc);
if (IS_ERR(regmap)) {
@@ -772,7 +787,7 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev)
regmap_write(regmap, 0x4, 0x3b);
regmap_write(regmap, 0x8, 0xff05);
- ret = qcom_cc_really_probe(&pdev->dev, &lpass_audio_cc_sc7280_desc, regmap);
+ ret = qcom_cc_really_probe(&pdev->dev, desc, regmap);
if (ret) {
dev_err(&pdev->dev, "Failed to register LPASS AUDIO CC clocks\n");
goto exit;
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index 4bd8862dc82b..91928db411dc 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -1549,28 +1549,6 @@ static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
return devm_reset_controller_register(priv->dev, &priv->rcdev);
}
-static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv,
- const struct of_phandle_args *clkspec)
-{
- const struct rzg2l_cpg_info *info = priv->info;
- unsigned int id;
- unsigned int i;
-
- if (clkspec->args_count != 2)
- return false;
-
- if (clkspec->args[0] != CPG_MOD)
- return false;
-
- id = clkspec->args[1] + info->num_total_core_clks;
- for (i = 0; i < info->num_no_pm_mod_clks; i++) {
- if (info->no_pm_mod_clks[i] == id)
- return false;
- }
-
- return true;
-}
-
/**
* struct rzg2l_cpg_pm_domains - RZ/G2L PM domains data structure
* @onecell_data: cell data
@@ -1595,45 +1573,73 @@ struct rzg2l_cpg_pd {
u16 id;
};
+static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_pd *pd,
+ const struct of_phandle_args *clkspec)
+{
+ if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2)
+ return false;
+
+ switch (clkspec->args[0]) {
+ case CPG_MOD: {
+ struct rzg2l_cpg_priv *priv = pd->priv;
+ const struct rzg2l_cpg_info *info = priv->info;
+ unsigned int id = clkspec->args[1];
+
+ if (id >= priv->num_mod_clks)
+ return false;
+
+ id += info->num_total_core_clks;
+
+ for (unsigned int i = 0; i < info->num_no_pm_mod_clks; i++) {
+ if (info->no_pm_mod_clks[i] == id)
+ return false;
+ }
+
+ return true;
+ }
+
+ case CPG_CORE:
+ default:
+ return false;
+ }
+}
+
static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
{
struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
- struct rzg2l_cpg_priv *priv = pd->priv;
struct device_node *np = dev->of_node;
struct of_phandle_args clkspec;
bool once = true;
struct clk *clk;
+ unsigned int i;
int error;
- int i = 0;
-
- while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
- &clkspec)) {
- if (rzg2l_cpg_is_pm_clk(priv, &clkspec)) {
- if (once) {
- once = false;
- error = pm_clk_create(dev);
- if (error) {
- of_node_put(clkspec.np);
- goto err;
- }
- }
- clk = of_clk_get_from_provider(&clkspec);
+
+ for (i = 0; !of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec); i++) {
+ if (!rzg2l_cpg_is_pm_clk(pd, &clkspec)) {
of_node_put(clkspec.np);
- if (IS_ERR(clk)) {
- error = PTR_ERR(clk);
- goto fail_destroy;
- }
+ continue;
+ }
- error = pm_clk_add_clk(dev, clk);
+ if (once) {
+ once = false;
+ error = pm_clk_create(dev);
if (error) {
- dev_err(dev, "pm_clk_add_clk failed %d\n",
- error);
- goto fail_put;
+ of_node_put(clkspec.np);
+ goto err;
}
- } else {
- of_node_put(clkspec.np);
}
- i++;
+ clk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
+ if (IS_ERR(clk)) {
+ error = PTR_ERR(clk);
+ goto fail_destroy;
+ }
+
+ error = pm_clk_add_clk(dev, clk);
+ if (error) {
+ dev_err(dev, "pm_clk_add_clk failed %d\n", error);
+ goto fail_put;
+ }
}
return 0;
diff --git a/drivers/clk/renesas/rzv2h-cpg.c b/drivers/clk/renesas/rzv2h-cpg.c
index a4c1e92e1fd7..4e81a0bae022 100644
--- a/drivers/clk/renesas/rzv2h-cpg.c
+++ b/drivers/clk/renesas/rzv2h-cpg.c
@@ -447,8 +447,7 @@ static void rzv2h_mod_clock_mstop_enable(struct rzv2h_cpg_priv *priv,
{
unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data);
u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data);
- unsigned int index = (mstop_index - 1) * 16;
- atomic_t *mstop = &priv->mstop_count[index];
+ atomic_t *mstop = &priv->mstop_count[mstop_index * 16];
unsigned long flags;
unsigned int i;
u32 val = 0;
@@ -469,8 +468,7 @@ static void rzv2h_mod_clock_mstop_disable(struct rzv2h_cpg_priv *priv,
{
unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data);
u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data);
- unsigned int index = (mstop_index - 1) * 16;
- atomic_t *mstop = &priv->mstop_count[index];
+ atomic_t *mstop = &priv->mstop_count[mstop_index * 16];
unsigned long flags;
unsigned int i;
u32 val = 0;
@@ -630,8 +628,7 @@ rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod,
} else if (clock->mstop_data != BUS_MSTOP_NONE && mod->critical) {
unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, clock->mstop_data);
u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, clock->mstop_data);
- unsigned int index = (mstop_index - 1) * 16;
- atomic_t *mstop = &priv->mstop_count[index];
+ atomic_t *mstop = &priv->mstop_count[mstop_index * 16];
unsigned long flags;
unsigned int i;
u32 val = 0;
@@ -926,6 +923,9 @@ static int __init rzv2h_cpg_probe(struct platform_device *pdev)
if (!priv->mstop_count)
return -ENOMEM;
+ /* Adjust for CPG_BUS_m_MSTOP starting from m = 1 */
+ priv->mstop_count -= 16;
+
priv->resets = devm_kmemdup(dev, info->resets, sizeof(*info->resets) *
info->num_resets, GFP_KERNEL);
if (!priv->resets)
diff --git a/drivers/clk/samsung/clk-exynosautov920.c b/drivers/clk/samsung/clk-exynosautov920.c
index 2a8bfd5d9abc..24f26e254c29 100644
--- a/drivers/clk/samsung/clk-exynosautov920.c
+++ b/drivers/clk/samsung/clk-exynosautov920.c
@@ -1393,7 +1393,7 @@ static const unsigned long hsi1_clk_regs[] __initconst = {
/* List of parent clocks for Muxes in CMU_HSI1 */
PNAME(mout_hsi1_mmc_card_user_p) = {"oscclk", "dout_clkcmu_hsi1_mmc_card"};
PNAME(mout_hsi1_noc_user_p) = { "oscclk", "dout_clkcmu_hsi1_noc" };
-PNAME(mout_hsi1_usbdrd_user_p) = { "oscclk", "mout_clkcmu_hsi1_usbdrd" };
+PNAME(mout_hsi1_usbdrd_user_p) = { "oscclk", "dout_clkcmu_hsi1_usbdrd" };
PNAME(mout_hsi1_usbdrd_p) = { "dout_tcxo_div2", "mout_hsi1_usbdrd_user" };
static const struct samsung_mux_clock hsi1_mux_clks[] __initconst = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
index bb66c906ebbb..e83d4fd40240 100644
--- a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
+++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
@@ -412,19 +412,23 @@ static const struct clk_parent_data mmc0_mmc1_parents[] = {
{ .hw = &pll_periph0_2x_clk.common.hw },
{ .hw = &pll_audio1_div2_clk.common.hw },
};
-static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc0_mmc1_parents, 0x830,
- 0, 4, /* M */
- 8, 2, /* P */
- 24, 3, /* mux */
- BIT(31), /* gate */
- 0);
-
-static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc0_mmc1_parents, 0x834,
- 0, 4, /* M */
- 8, 2, /* P */
- 24, 3, /* mux */
- BIT(31), /* gate */
- 0);
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0",
+ mmc0_mmc1_parents, 0x830,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1",
+ mmc0_mmc1_parents, 0x834,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ 0);
static const struct clk_parent_data mmc2_parents[] = {
{ .fw_name = "hosc" },
@@ -433,12 +437,14 @@ static const struct clk_parent_data mmc2_parents[] = {
{ .hw = &pll_periph0_800M_clk.common.hw },
{ .hw = &pll_audio1_div2_clk.common.hw },
};
-static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc2_parents, 0x838,
- 0, 4, /* M */
- 8, 2, /* P */
- 24, 3, /* mux */
- BIT(31), /* gate */
- 0);
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc2_parents,
+ 0x838,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 2, /* post-div */
+ 0);
static SUNXI_CCU_GATE_HWS(bus_mmc0_clk, "bus-mmc0", psi_ahb_hws,
0x84c, BIT(0), 0);
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
index 190816c35da9..6050cbfa922e 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
@@ -328,10 +328,16 @@ static SUNXI_CCU_M_WITH_MUX_GATE(gpu0_clk, "gpu0", gpu0_parents, 0x670,
24, 1, /* mux */
BIT(31), /* gate */
CLK_SET_RATE_PARENT);
+
+/*
+ * This clk is needed as a temporary fall back during GPU PLL freq changes.
+ * Set CLK_IS_CRITICAL flag to prevent from being disabled.
+ */
+#define SUN50I_H616_GPU_CLK1_REG 0x674
static SUNXI_CCU_M_WITH_GATE(gpu1_clk, "gpu1", "pll-periph0-2x", 0x674,
0, 2, /* M */
BIT(31),/* gate */
- 0);
+ CLK_IS_CRITICAL);
static SUNXI_CCU_GATE(bus_gpu_clk, "bus-gpu", "psi-ahb1-ahb2",
0x67c, BIT(0), 0);
@@ -1120,6 +1126,19 @@ static struct ccu_pll_nb sun50i_h616_pll_cpu_nb = {
.lock = BIT(28),
};
+static struct ccu_mux_nb sun50i_h616_gpu_nb = {
+ .common = &gpu0_clk.common,
+ .cm = &gpu0_clk.mux,
+ .delay_us = 1, /* manual doesn't really say */
+ .bypass_index = 1, /* GPU_CLK1@400MHz */
+};
+
+static struct ccu_pll_nb sun50i_h616_pll_gpu_nb = {
+ .common = &pll_gpu_clk.common,
+ .enable = BIT(29), /* LOCK_ENABLE */
+ .lock = BIT(28),
+};
+
static int sun50i_h616_ccu_probe(struct platform_device *pdev)
{
void __iomem *reg;
@@ -1171,6 +1190,14 @@ static int sun50i_h616_ccu_probe(struct platform_device *pdev)
writel(val, reg + SUN50I_H616_PLL_AUDIO_REG);
/*
+ * Set the input-divider for the gpu1 clock to 3, to reach a safe 400 MHz.
+ */
+ val = readl(reg + SUN50I_H616_GPU_CLK1_REG);
+ val &= ~GENMASK(1, 0);
+ val |= 2;
+ writel(val, reg + SUN50I_H616_GPU_CLK1_REG);
+
+ /*
* First clock parent (osc32K) is unusable for CEC. But since there
* is no good way to force parent switch (both run with same frequency),
* just set second clock parent here.
@@ -1190,6 +1217,13 @@ static int sun50i_h616_ccu_probe(struct platform_device *pdev)
/* Re-lock the CPU PLL after any rate changes */
ccu_pll_notifier_register(&sun50i_h616_pll_cpu_nb);
+ /* Reparent GPU during GPU PLL rate changes */
+ ccu_mux_notifier_register(pll_gpu_clk.common.hw.clk,
+ &sun50i_h616_gpu_nb);
+
+ /* Re-lock the GPU PLL after any rate changes */
+ ccu_pll_notifier_register(&sun50i_h616_pll_gpu_nb);
+
return 0;
}
diff --git a/drivers/clk/sunxi-ng/ccu_mp.h b/drivers/clk/sunxi-ng/ccu_mp.h
index 6e50f3728fb5..7d836a9fb3db 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.h
+++ b/drivers/clk/sunxi-ng/ccu_mp.h
@@ -52,6 +52,28 @@ struct ccu_mp {
} \
}
+#define SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(_struct, _name, _parents, \
+ _reg, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _postdiv, _flags)\
+ struct ccu_mp _struct = { \
+ .enable = _gate, \
+ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .p = _SUNXI_CCU_DIV(_pshift, _pwidth), \
+ .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
+ .fixed_post_div = _postdiv, \
+ .common = { \
+ .reg = _reg, \
+ .features = CCU_FEATURE_FIXED_POSTDIV, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \
+ _parents, \
+ &ccu_mp_ops, \
+ _flags), \
+ } \
+ }
+
#define SUNXI_CCU_MP_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
_mshift, _mwidth, \
_pshift, _pwidth, \
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index 39f7c2d736d1..b603c25f3dfa 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -103,7 +103,7 @@ int __init clocksource_i8253_init(void)
#ifdef CONFIG_CLKEVT_I8253
void clockevent_i8253_disable(void)
{
- raw_spin_lock(&i8253_lock);
+ guard(raw_spinlock_irqsave)(&i8253_lock);
/*
* Writing the MODE register should stop the counter, according to
@@ -132,8 +132,6 @@ void clockevent_i8253_disable(void)
outb_p(0, PIT_CH0);
outb_p(0x30, PIT_MODE);
-
- raw_spin_unlock(&i8253_lock);
}
static int pit_shutdown(struct clock_event_device *evt)
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 7907b740497a..abb685a080a5 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -115,6 +115,9 @@ static void gic_update_frequency(void *data)
static int gic_starting_cpu(unsigned int cpu)
{
+ /* Ensure the GIC counter is running */
+ clear_gic_config(GIC_CONFIG_COUNTSTOP);
+
gic_clockevent_cpu_init(cpu, this_cpu_ptr(&gic_clockevent_device));
return 0;
}
@@ -288,9 +291,6 @@ static int __init gic_clocksource_of_init(struct device_node *node)
pr_warn("Unable to register clock notifier\n");
}
- /* And finally start the counter */
- clear_gic_config(GIC_CONFIG_COUNTSTOP);
-
/*
* It's safe to use the MIPS GIC timer as a sched clock source only if
* its ticks are stable, which is true on either the platforms with
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index 48ce50c5f5e6..4d7cf338824a 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -126,7 +126,13 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
static int riscv_timer_dying_cpu(unsigned int cpu)
{
+ /*
+ * Stop the timer when the cpu is going to be offline otherwise
+ * the timer interrupt may be pending while performing power-down.
+ */
+ riscv_clock_event_stop();
disable_percpu_irq(riscv_clock_event_irq);
+
return 0;
}
diff --git a/drivers/comedi/drivers/jr3_pci.c b/drivers/comedi/drivers/jr3_pci.c
index 951c23fa0369..75dce1ff2419 100644
--- a/drivers/comedi/drivers/jr3_pci.c
+++ b/drivers/comedi/drivers/jr3_pci.c
@@ -758,7 +758,7 @@ static void jr3_pci_detach(struct comedi_device *dev)
struct jr3_pci_dev_private *devpriv = dev->private;
if (devpriv)
- del_timer_sync(&devpriv->timer);
+ timer_shutdown_sync(&devpriv->timer);
comedi_pci_detach(dev);
}
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 4f9cb943d945..0d46402e3094 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -76,7 +76,7 @@ config ARM_VEXPRESS_SPC_CPUFREQ
config ARM_BRCMSTB_AVS_CPUFREQ
tristate "Broadcom STB AVS CPUfreq driver"
depends on (ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ) || COMPILE_TEST
- default y
+ default y if ARCH_BRCMSTB && !ARM_SCMI_CPUFREQ
help
Some Broadcom STB SoCs use a co-processor running proprietary firmware
("AVS") to handle voltage and frequency scaling. This driver provides
@@ -88,7 +88,7 @@ config ARM_HIGHBANK_CPUFREQ
tristate "Calxeda Highbank-based"
depends on ARCH_HIGHBANK || COMPILE_TEST
depends on CPUFREQ_DT && REGULATOR && PL320_MBOX
- default m
+ default m if ARCH_HIGHBANK
help
This adds the CPUFreq driver for Calxeda Highbank SoC
based boards.
@@ -133,7 +133,7 @@ config ARM_MEDIATEK_CPUFREQ
config ARM_MEDIATEK_CPUFREQ_HW
tristate "MediaTek CPUFreq HW driver"
depends on ARCH_MEDIATEK || COMPILE_TEST
- default m
+ default m if ARCH_MEDIATEK
help
Support for the CPUFreq HW driver.
Some MediaTek chipsets have a HW engine to offload the steps
@@ -181,7 +181,7 @@ config ARM_RASPBERRYPI_CPUFREQ
config ARM_S3C64XX_CPUFREQ
bool "Samsung S3C64XX"
depends on CPU_S3C6410 || COMPILE_TEST
- default y
+ default CPU_S3C6410
help
This adds the CPUFreq driver for Samsung S3C6410 SoC.
@@ -190,7 +190,7 @@ config ARM_S3C64XX_CPUFREQ
config ARM_S5PV210_CPUFREQ
bool "Samsung S5PV210 and S5PC110"
depends on CPU_S5PV210 || COMPILE_TEST
- default y
+ default CPU_S5PV210
help
This adds the CPUFreq driver for Samsung S5PV210 and
S5PC110 SoCs.
@@ -214,7 +214,7 @@ config ARM_SCMI_CPUFREQ
config ARM_SPEAR_CPUFREQ
bool "SPEAr CPUFreq support"
depends on PLAT_SPEAR || COMPILE_TEST
- default y
+ default PLAT_SPEAR
help
This adds the CPUFreq driver support for SPEAr SOCs.
@@ -233,7 +233,7 @@ config ARM_TEGRA20_CPUFREQ
tristate "Tegra20/30 CPUFreq support"
depends on ARCH_TEGRA || COMPILE_TEST
depends on CPUFREQ_DT
- default y
+ default ARCH_TEGRA
help
This adds the CPUFreq driver support for Tegra20/30 SOCs.
@@ -241,7 +241,7 @@ config ARM_TEGRA124_CPUFREQ
bool "Tegra124 CPUFreq support"
depends on ARCH_TEGRA || COMPILE_TEST
depends on CPUFREQ_DT
- default y
+ default ARCH_TEGRA
help
This adds the CPUFreq driver support for Tegra124 SOCs.
@@ -256,14 +256,14 @@ config ARM_TEGRA194_CPUFREQ
tristate "Tegra194 CPUFreq support"
depends on ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC || (64BIT && COMPILE_TEST)
depends on TEGRA_BPMP
- default y
+ default ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC
help
This adds CPU frequency driver support for Tegra194 SOCs.
config ARM_TI_CPUFREQ
bool "Texas Instruments CPUFreq support"
depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST
- default y
+ default ARCH_OMAP2PLUS || ARCH_K3
help
This driver enables valid OPPs on the running platform based on
values contained within the SoC in use. Enable this in order to
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 463b69a2dff5..5f5586a579d5 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -660,7 +660,7 @@ static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq)
nominal_perf = perf_caps.nominal_perf;
if (nominal_freq)
- *nominal_freq = perf_caps.nominal_freq;
+ *nominal_freq = perf_caps.nominal_freq * 1000;
if (!highest_perf || !nominal_perf) {
pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
@@ -909,6 +909,20 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
pr_warn(FW_WARN "P-state 0 is not max freq\n");
+ if (acpi_cpufreq_driver.set_boost) {
+ if (policy->boost_supported) {
+ /*
+ * The firmware may have altered boost state while the
+ * CPU was offline (for example during a suspend-resume
+ * cycle).
+ */
+ if (policy->boost_enabled != boost_state(cpu))
+ set_boost(policy, policy->boost_enabled);
+ } else {
+ policy->boost_supported = true;
+ }
+ }
+
return result;
err_unreg:
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 1b26845703f6..a27749d948b4 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -746,7 +746,6 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
pr_err("Boost mode is not supported by this processor or SBIOS\n");
return -EOPNOTSUPP;
}
- guard(mutex)(&amd_pstate_driver_lock);
ret = amd_pstate_cpu_boost_update(policy, state);
refresh_frequency_limits(policy);
diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c
index 269b18c62d04..82007f6a24d2 100644
--- a/drivers/cpufreq/apple-soc-cpufreq.c
+++ b/drivers/cpufreq/apple-soc-cpufreq.c
@@ -134,11 +134,17 @@ static const struct of_device_id apple_soc_cpufreq_of_match[] __maybe_unused = {
static unsigned int apple_soc_cpufreq_get_rate(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
- struct apple_cpu_priv *priv = policy->driver_data;
+ struct cpufreq_policy *policy;
+ struct apple_cpu_priv *priv;
struct cpufreq_frequency_table *p;
unsigned int pstate;
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (unlikely(!policy))
+ return 0;
+
+ priv = policy->driver_data;
+
if (priv->info->cur_pstate_mask) {
u32 reg = readl_relaxed(priv->reg_base + APPLE_DVFS_STATUS);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 8f512448382f..ba7c16c0e475 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -749,7 +749,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
int ret;
if (!policy)
- return -ENODEV;
+ return 0;
cpu_data = policy->driver_data;
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 2aa00769cf09..a010da0f6337 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -175,6 +175,7 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "qcom,sm8350", },
{ .compatible = "qcom,sm8450", },
{ .compatible = "qcom,sm8550", },
+ { .compatible = "qcom,sm8650", },
{ .compatible = "st,stih407", },
{ .compatible = "st,stih410", },
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 934e0e19824c..61cbfb56bf4e 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -535,16 +535,18 @@ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
static unsigned int __resolve_freq(struct cpufreq_policy *policy,
- unsigned int target_freq, unsigned int relation)
+ unsigned int target_freq,
+ unsigned int min, unsigned int max,
+ unsigned int relation)
{
unsigned int idx;
- target_freq = clamp_val(target_freq, policy->min, policy->max);
+ target_freq = clamp_val(target_freq, min, max);
if (!policy->freq_table)
return target_freq;
- idx = cpufreq_frequency_table_target(policy, target_freq, relation);
+ idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation);
policy->cached_resolved_idx = idx;
policy->cached_target_freq = target_freq;
return policy->freq_table[idx].frequency;
@@ -564,7 +566,21 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
unsigned int target_freq)
{
- return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
+ unsigned int min = READ_ONCE(policy->min);
+ unsigned int max = READ_ONCE(policy->max);
+
+ /*
+ * If this function runs in parallel with cpufreq_set_policy(), it may
+ * read policy->min before the update and policy->max after the update
+ * or the other way around, so there is no ordering guarantee.
+ *
+ * Resolve this by always honoring the max (in case it comes from
+ * thermal throttling or similar).
+ */
+ if (unlikely(min > max))
+ min = max;
+
+ return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE);
}
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
@@ -2337,7 +2353,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
if (cpufreq_disabled())
return -ENODEV;
- target_freq = __resolve_freq(policy, target_freq, relation);
+ target_freq = __resolve_freq(policy, target_freq, policy->min,
+ policy->max, relation);
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
policy->cpu, target_freq, relation, old_target_freq);
@@ -2661,11 +2678,18 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
* Resolve policy min/max to available frequencies. It ensures
* no frequency resolution will neither overshoot the requested maximum
* nor undershoot the requested minimum.
+ *
+ * Avoid storing intermediate values in policy->max or policy->min and
+ * compiler optimizations around them because they may be accessed
+ * concurrently by cpufreq_driver_resolve_freq() during the update.
*/
- policy->min = new_data.min;
- policy->max = new_data.max;
- policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
- policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
+ WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max,
+ new_data.min, new_data.max,
+ CPUFREQ_RELATION_H));
+ new_data.min = __resolve_freq(policy, new_data.min, new_data.min,
+ new_data.max, CPUFREQ_RELATION_L);
+ WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
+
trace_cpu_frequency_limits(policy);
cpufreq_update_pressure(policy);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index a7c38b8b3e78..0e65d37c9231 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -76,7 +76,8 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
return freq_next;
}
- index = cpufreq_frequency_table_target(policy, freq_next, relation);
+ index = cpufreq_frequency_table_target(policy, freq_next, policy->min,
+ policy->max, relation);
freq_req = freq_table[index].frequency;
freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
freq_avg = freq_req - freq_reduc;
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 10e80d912b8d..9db21ffc1197 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -116,8 +116,8 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+ unsigned int target_freq, unsigned int min,
+ unsigned int max, unsigned int relation)
{
struct cpufreq_frequency_table optimal = {
.driver_data = ~0,
@@ -148,7 +148,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
cpufreq_for_each_valid_entry_idx(pos, table, i) {
freq = pos->frequency;
- if ((freq < policy->min) || (freq > policy->max))
+ if (freq < min || freq > max)
continue;
if (freq == target_freq) {
optimal.driver_data = i;
@@ -367,6 +367,10 @@ int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy)
if (ret)
return ret;
+ /* Driver's may have set this field already */
+ if (policy_has_boost_freq(policy))
+ policy->boost_supported = true;
+
return set_freq_table_sorted(policy);
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 9c4cc01fd51a..43e847e9f741 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -598,6 +598,9 @@ static bool turbo_is_disabled(void)
{
u64 misc_en;
+ if (!cpu_feature_enabled(X86_FEATURE_IDA))
+ return true;
+
rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 914bf2c940a0..9c6eb1238f1b 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -37,11 +37,17 @@ static struct cpufreq_driver scmi_cpufreq_driver;
static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
- struct scmi_data *priv = policy->driver_data;
+ struct cpufreq_policy *policy;
+ struct scmi_data *priv;
unsigned long rate;
int ret;
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (unlikely(!policy))
+ return 0;
+
+ priv = policy->driver_data;
+
ret = perf_ops->freq_get(ph, priv->domain_id, &rate, false);
if (ret)
return 0;
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 1f97b949763f..9118856e1736 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -29,9 +29,16 @@ static struct scpi_ops *scpi_ops;
static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
- struct scpi_data *priv = policy->driver_data;
- unsigned long rate = clk_get_rate(priv->clk);
+ struct cpufreq_policy *policy;
+ struct scpi_data *priv;
+ unsigned long rate;
+
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (unlikely(!policy))
+ return 0;
+
+ priv = policy->driver_data;
+ rate = clk_get_rate(priv->clk);
return rate / 1000;
}
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 47d6840b3489..744312a44279 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -194,7 +194,9 @@ static int sun50i_cpufreq_get_efuse(void)
struct nvmem_cell *speedbin_nvmem;
const struct of_device_id *match;
struct device *cpu_dev;
- u32 *speedbin;
+ void *speedbin_ptr;
+ u32 speedbin = 0;
+ size_t len;
int ret;
cpu_dev = get_cpu_device(0);
@@ -217,14 +219,18 @@ static int sun50i_cpufreq_get_efuse(void)
return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
"Could not get nvmem cell\n");
- speedbin = nvmem_cell_read(speedbin_nvmem, NULL);
+ speedbin_ptr = nvmem_cell_read(speedbin_nvmem, &len);
nvmem_cell_put(speedbin_nvmem);
- if (IS_ERR(speedbin))
- return PTR_ERR(speedbin);
+ if (IS_ERR(speedbin_ptr))
+ return PTR_ERR(speedbin_ptr);
- ret = opp_data->efuse_xlate(*speedbin);
+ if (len <= 4)
+ memcpy(&speedbin, speedbin_ptr, len);
+ speedbin = le32_to_cpu(speedbin);
- kfree(speedbin);
+ ret = opp_data->efuse_xlate(speedbin);
+
+ kfree(speedbin_ptr);
return ret;
};
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index c7761eb99f3c..92aa50f01666 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -73,11 +73,18 @@ static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
{
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id;
+ u32 cpu;
policy->freq_table = data->clusters[cluster].table;
policy->cpuinfo.transition_latency = 300 * 1000;
policy->driver_data = NULL;
+ /* set same policy for all cpus in a cluster */
+ for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) {
+ if (data->cpus[cpu].bpmp_cluster_id == cluster)
+ cpumask_set_cpu(cpu, policy->cpus);
+ }
+
return 0;
}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 28363bfa3e4c..42b77d820d0f 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -192,8 +192,19 @@ again:
* This can deal with workloads that have long pauses interspersed
* with sporadic activity with a bunch of short pauses.
*/
- if ((divisor * 4) <= INTERVALS * 3)
+ if (divisor * 4 <= INTERVALS * 3) {
+ /*
+ * If there are sufficiently many data points still under
+ * consideration after the outliers have been eliminated,
+ * returning without a prediction would be a mistake because it
+ * is likely that the next interval will not exceed the current
+ * maximum, so return the latter in that case.
+ */
+ if (divisor >= INTERVALS / 2)
+ return max;
+
return UINT_MAX;
+ }
thresh = max - 1;
goto again;
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index 75bebec2c757..0fcf4a39de27 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -163,6 +163,12 @@ static int atmel_sha204a_probe(struct i2c_client *client)
i2c_priv->hwrng.name = dev_name(&client->dev);
i2c_priv->hwrng.read = atmel_sha204a_rng_read;
+ /*
+ * According to review by Bill Cox [1], this HWRNG has very low entropy.
+ * [1] https://www.metzdowd.com/pipermail/cryptography/2014-December/023858.html
+ */
+ i2c_priv->hwrng.quality = 1;
+
ret = devm_hwrng_register(&client->dev, &i2c_priv->hwrng);
if (ret)
dev_warn(&client->dev, "failed to register RNG (%d)\n", ret);
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index 157f9a9ed636..2ebc878da160 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -532,6 +532,7 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
{ PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] },
{ PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] },
+ { PCI_VDEVICE(AMD, 0x1134), (kernel_ulong_t)&dev_vdata[7] },
{ PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] },
{ PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] },
/* Last entry must be zero */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
index 5387c68f3c9d..426244107037 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
@@ -264,9 +264,10 @@ static int cpt_process_ccode(struct otx2_cptlfs_info *lfs,
break;
}
- dev_err(&pdev->dev,
- "Request failed with software error code 0x%x\n",
- cpt_status->s.uc_compcode);
+ pr_debug("Request failed with software error code 0x%x: algo = %s driver = %s\n",
+ cpt_status->s.uc_compcode,
+ info->req->areq->tfm->__crt_alg->cra_name,
+ info->req->areq->tfm->__crt_alg->cra_driver_name);
otx2_cpt_dump_sg_list(pdev, info->req);
break;
}
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index d94a26c3541a..133ebc998236 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -265,12 +265,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
MXS_DCP_CONTROL0_INTERRUPT |
MXS_DCP_CONTROL0_ENABLE_CIPHER;
- if (key_referenced)
- /* Set OTP key bit to select the key via KEY_SELECT. */
- desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY;
- else
+ if (!key_referenced)
/* Payload contains the key. */
desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
+ else if (actx->key[0] == DCP_PAES_KEY_OTP)
+ /* Set OTP key bit to select the key via KEY_SELECT. */
+ desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY;
if (rctx->enc)
desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
index 117c2e94c761..5ca7b0eed568 100644
--- a/drivers/cxl/core/regs.c
+++ b/drivers/cxl/core/regs.c
@@ -581,7 +581,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
resource_size_t rcrb = ri->base;
void __iomem *addr;
u32 bar0, bar1;
- u16 cmd;
u32 id;
if (which == CXL_RCRB_UPSTREAM)
@@ -603,7 +602,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
}
id = readl(addr + PCI_VENDOR_ID);
- cmd = readw(addr + PCI_COMMAND);
bar0 = readl(addr + PCI_BASE_ADDRESS_0);
bar1 = readl(addr + PCI_BASE_ADDRESS_1);
iounmap(addr);
@@ -618,8 +616,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri
dev_err(dev, "Failed to access Downstream Port RCRB\n");
return CXL_RESOURCE_NONE;
}
- if (!(cmd & PCI_COMMAND_MEMORY))
- return CXL_RESOURCE_NONE;
/* The RCRB is a Memory Window, and the MEM_TYPE_1M bit is obsolete */
if (bar0 & (PCI_BASE_ADDRESS_MEM_TYPE_1M | PCI_BASE_ADDRESS_SPACE_IO))
return CXL_RESOURCE_NONE;
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 5f8d010516f0..b1ef4546346d 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -320,8 +320,9 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
count++;
dma_resv_list_set(fobj, i, fence, usage);
- /* pointer update must be visible before we extend the num_fences */
- smp_store_mb(fobj->num_fences, count);
+ /* fence update must be visible before we extend the num_fences */
+ smp_wmb();
+ fobj->num_fences = count;
}
EXPORT_SYMBOL(dma_resv_add_fence);
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index cc7398cc17d6..e74e36a8ecda 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -393,7 +393,7 @@ static long udmabuf_create(struct miscdevice *device,
if (!ubuf)
return -ENOMEM;
- pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
+ pglimit = ((u64)size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
for (i = 0; i < head->count; i++) {
pgoff_t subpgcnt;
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 20b10c15c696..0117bb2e8591 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -893,7 +893,7 @@ static int bcm2835_dma_suspend_late(struct device *dev)
}
static const struct dev_pm_ops bcm2835_dma_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(bcm2835_dma_suspend_late, NULL)
+ LATE_SYSTEM_SLEEP_PM_OPS(bcm2835_dma_suspend_late, NULL)
};
static int bcm2835_dma_probe(struct platform_device *pdev)
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index e3e0e88a76d3..619403c6e517 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -57,7 +57,7 @@ static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
intr = edma_readl_chreg(fsl_chan, ch_int);
if (!intr)
- return IRQ_HANDLED;
+ return IRQ_NONE;
edma_writel_chreg(fsl_chan, 1, ch_int);
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index ff94ee892339..6d12033649f8 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -222,7 +222,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
struct idxd_wq *wq;
struct device *dev, *fdev;
int rc = 0;
- struct iommu_sva *sva;
+ struct iommu_sva *sva = NULL;
unsigned int pasid;
struct idxd_cdev *idxd_cdev;
@@ -317,7 +317,7 @@ failed_set_pasid:
if (device_user_pasid_enabled(idxd))
idxd_xa_pasid_remove(ctx);
failed_get_pasid:
- if (device_user_pasid_enabled(idxd))
+ if (device_user_pasid_enabled(idxd) && !IS_ERR_OR_NULL(sva))
iommu_sva_unbind_device(sva);
failed:
mutex_unlock(&wq->wq_lock);
@@ -407,6 +407,9 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO))
return -EPERM;
+ if (current->mm != ctx->mm)
+ return -EPERM;
+
rc = check_vma(wq, vma, __func__);
if (rc < 0)
return rc;
@@ -473,6 +476,9 @@ static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t
ssize_t written = 0;
int i;
+ if (current->mm != ctx->mm)
+ return -EPERM;
+
for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) {
int rc = idxd_submit_user_descriptor(ctx, udesc + i);
@@ -493,6 +499,9 @@ static __poll_t idxd_cdev_poll(struct file *filp,
struct idxd_device *idxd = wq->idxd;
__poll_t out = 0;
+ if (current->mm != ctx->mm)
+ return POLLNVAL;
+
poll_wait(filp, &wq->err_queue, wait);
spin_lock(&idxd->dev_lock);
if (idxd->sw_err.valid)
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index b946f78f85e1..b908942fac73 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -155,6 +155,25 @@ static void idxd_cleanup_interrupts(struct idxd_device *idxd)
pci_free_irq_vectors(pdev);
}
+static void idxd_clean_wqs(struct idxd_device *idxd)
+{
+ struct idxd_wq *wq;
+ struct device *conf_dev;
+ int i;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ wq = idxd->wqs[i];
+ if (idxd->hw.wq_cap.op_config)
+ bitmap_free(wq->opcap_bmap);
+ kfree(wq->wqcfg);
+ conf_dev = wq_confdev(wq);
+ put_device(conf_dev);
+ kfree(wq);
+ }
+ bitmap_free(idxd->wq_enable_map);
+ kfree(idxd->wqs);
+}
+
static int idxd_setup_wqs(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
@@ -169,8 +188,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev));
if (!idxd->wq_enable_map) {
- kfree(idxd->wqs);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_bitmap;
}
for (i = 0; i < idxd->max_wqs; i++) {
@@ -189,10 +208,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
conf_dev->bus = &dsa_bus_type;
conf_dev->type = &idxd_wq_device_type;
rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
- if (rc < 0) {
- put_device(conf_dev);
+ if (rc < 0)
goto err;
- }
mutex_init(&wq->wq_lock);
init_waitqueue_head(&wq->err_queue);
@@ -203,7 +220,6 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
if (!wq->wqcfg) {
- put_device(conf_dev);
rc = -ENOMEM;
goto err;
}
@@ -211,9 +227,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
if (idxd->hw.wq_cap.op_config) {
wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
if (!wq->opcap_bmap) {
- put_device(conf_dev);
rc = -ENOMEM;
- goto err;
+ goto err_opcap_bmap;
}
bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
}
@@ -224,15 +239,46 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
return 0;
- err:
+err_opcap_bmap:
+ kfree(wq->wqcfg);
+
+err:
+ put_device(conf_dev);
+ kfree(wq);
+
while (--i >= 0) {
wq = idxd->wqs[i];
+ if (idxd->hw.wq_cap.op_config)
+ bitmap_free(wq->opcap_bmap);
+ kfree(wq->wqcfg);
conf_dev = wq_confdev(wq);
put_device(conf_dev);
+ kfree(wq);
+
}
+ bitmap_free(idxd->wq_enable_map);
+
+err_bitmap:
+ kfree(idxd->wqs);
+
return rc;
}
+static void idxd_clean_engines(struct idxd_device *idxd)
+{
+ struct idxd_engine *engine;
+ struct device *conf_dev;
+ int i;
+
+ for (i = 0; i < idxd->max_engines; i++) {
+ engine = idxd->engines[i];
+ conf_dev = engine_confdev(engine);
+ put_device(conf_dev);
+ kfree(engine);
+ }
+ kfree(idxd->engines);
+}
+
static int idxd_setup_engines(struct idxd_device *idxd)
{
struct idxd_engine *engine;
@@ -263,6 +309,7 @@ static int idxd_setup_engines(struct idxd_device *idxd)
rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id);
if (rc < 0) {
put_device(conf_dev);
+ kfree(engine);
goto err;
}
@@ -276,10 +323,26 @@ static int idxd_setup_engines(struct idxd_device *idxd)
engine = idxd->engines[i];
conf_dev = engine_confdev(engine);
put_device(conf_dev);
+ kfree(engine);
}
+ kfree(idxd->engines);
+
return rc;
}
+static void idxd_clean_groups(struct idxd_device *idxd)
+{
+ struct idxd_group *group;
+ int i;
+
+ for (i = 0; i < idxd->max_groups; i++) {
+ group = idxd->groups[i];
+ put_device(group_confdev(group));
+ kfree(group);
+ }
+ kfree(idxd->groups);
+}
+
static int idxd_setup_groups(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
@@ -310,6 +373,7 @@ static int idxd_setup_groups(struct idxd_device *idxd)
rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id);
if (rc < 0) {
put_device(conf_dev);
+ kfree(group);
goto err;
}
@@ -334,20 +398,18 @@ static int idxd_setup_groups(struct idxd_device *idxd)
while (--i >= 0) {
group = idxd->groups[i];
put_device(group_confdev(group));
+ kfree(group);
}
+ kfree(idxd->groups);
+
return rc;
}
static void idxd_cleanup_internals(struct idxd_device *idxd)
{
- int i;
-
- for (i = 0; i < idxd->max_groups; i++)
- put_device(group_confdev(idxd->groups[i]));
- for (i = 0; i < idxd->max_engines; i++)
- put_device(engine_confdev(idxd->engines[i]));
- for (i = 0; i < idxd->max_wqs; i++)
- put_device(wq_confdev(idxd->wqs[i]));
+ idxd_clean_groups(idxd);
+ idxd_clean_engines(idxd);
+ idxd_clean_wqs(idxd);
destroy_workqueue(idxd->wq);
}
@@ -390,7 +452,7 @@ static int idxd_init_evl(struct idxd_device *idxd)
static int idxd_setup_internals(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
- int rc, i;
+ int rc;
init_waitqueue_head(&idxd->cmd_waitq);
@@ -421,14 +483,11 @@ static int idxd_setup_internals(struct idxd_device *idxd)
err_evl:
destroy_workqueue(idxd->wq);
err_wkq_create:
- for (i = 0; i < idxd->max_groups; i++)
- put_device(group_confdev(idxd->groups[i]));
+ idxd_clean_groups(idxd);
err_group:
- for (i = 0; i < idxd->max_engines; i++)
- put_device(engine_confdev(idxd->engines[i]));
+ idxd_clean_engines(idxd);
err_engine:
- for (i = 0; i < idxd->max_wqs; i++)
- put_device(wq_confdev(idxd->wqs[i]));
+ idxd_clean_wqs(idxd);
err_wqs:
return rc;
}
@@ -528,6 +587,17 @@ static void idxd_read_caps(struct idxd_device *idxd)
idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET);
}
+static void idxd_free(struct idxd_device *idxd)
+{
+ if (!idxd)
+ return;
+
+ put_device(idxd_confdev(idxd));
+ bitmap_free(idxd->opcap_bmap);
+ ida_free(&idxd_ida, idxd->id);
+ kfree(idxd);
+}
+
static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
{
struct device *dev = &pdev->dev;
@@ -545,28 +615,34 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d
idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type);
idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
if (idxd->id < 0)
- return NULL;
+ goto err_ida;
idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev));
- if (!idxd->opcap_bmap) {
- ida_free(&idxd_ida, idxd->id);
- return NULL;
- }
+ if (!idxd->opcap_bmap)
+ goto err_opcap;
device_initialize(conf_dev);
conf_dev->parent = dev;
conf_dev->bus = &dsa_bus_type;
conf_dev->type = idxd->data->dev_type;
rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
- if (rc < 0) {
- put_device(conf_dev);
- return NULL;
- }
+ if (rc < 0)
+ goto err_name;
spin_lock_init(&idxd->dev_lock);
spin_lock_init(&idxd->cmd_lock);
return idxd;
+
+err_name:
+ put_device(conf_dev);
+ bitmap_free(idxd->opcap_bmap);
+err_opcap:
+ ida_free(&idxd_ida, idxd->id);
+err_ida:
+ kfree(idxd);
+
+ return NULL;
}
static int idxd_enable_system_pasid(struct idxd_device *idxd)
@@ -1191,7 +1267,7 @@ int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev,
err:
pci_iounmap(pdev, idxd->reg_base);
err_iomap:
- put_device(idxd_confdev(idxd));
+ idxd_free(idxd);
err_idxd_alloc:
pci_disable_device(pdev);
return rc;
@@ -1233,7 +1309,6 @@ static void idxd_shutdown(struct pci_dev *pdev)
static void idxd_remove(struct pci_dev *pdev)
{
struct idxd_device *idxd = pci_get_drvdata(pdev);
- struct idxd_irq_entry *irq_entry;
idxd_unregister_devices(idxd);
/*
@@ -1246,20 +1321,12 @@ static void idxd_remove(struct pci_dev *pdev)
get_device(idxd_confdev(idxd));
device_unregister(idxd_confdev(idxd));
idxd_shutdown(pdev);
- if (device_pasid_enabled(idxd))
- idxd_disable_system_pasid(idxd);
idxd_device_remove_debugfs(idxd);
-
- irq_entry = idxd_get_ie(idxd, 0);
- free_irq(irq_entry->vector, irq_entry);
- pci_free_irq_vectors(pdev);
+ idxd_cleanup(idxd);
pci_iounmap(pdev, idxd->reg_base);
- if (device_user_pasid_enabled(idxd))
- idxd_disable_sva(pdev);
- pci_disable_device(pdev);
- destroy_workqueue(idxd->wq);
- perfmon_pmu_remove(idxd);
put_device(idxd_confdev(idxd));
+ idxd_free(idxd);
+ pci_disable_device(pdev);
}
static struct pci_driver idxd_pci_driver = {
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index 7c224c3ab7a0..f87d244cc2d6 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -84,6 +84,7 @@ struct k3_udma_glue_rx_channel {
struct k3_udma_glue_rx_flow *flows;
u32 flow_num;
u32 flows_ready;
+ bool single_fdq; /* one FDQ for all flows */
};
static void k3_udma_chan_dev_release(struct device *dev)
@@ -970,10 +971,13 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
ep_cfg = rx_chn->common.ep_config;
- if (xudma_is_pktdma(rx_chn->common.udmax))
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id;
- else
+ rx_chn->single_fdq = false;
+ } else {
rx_chn->udma_rchan_id = -1;
+ rx_chn->single_fdq = true;
+ }
/* request and cfg UDMAP RX channel */
rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax,
@@ -1103,6 +1107,9 @@ k3_udma_glue_request_remote_rx_chn_common(struct k3_udma_glue_rx_channel *rx_chn
rx_chn->common.chan_dev.dma_coherent = true;
dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
DMA_BIT_MASK(48));
+ rx_chn->single_fdq = false;
+ } else {
+ rx_chn->single_fdq = true;
}
ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
@@ -1453,7 +1460,7 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_num, void *data,
- void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
+ void (*cleanup)(void *data, dma_addr_t desc_dma))
{
struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
struct device *dev = rx_chn->common.dev;
@@ -1465,7 +1472,7 @@ void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
/* Skip RX FDQ in case one FDQ is used for the set of flows */
- if (skip_fdq)
+ if (rx_chn->single_fdq && flow_num)
goto do_reset;
/*
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 7ed1956b4642..d1b96f3d908f 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -1091,8 +1091,11 @@ static void udma_check_tx_completion(struct work_struct *work)
u32 residue_diff;
ktime_t time_diff;
unsigned long delay;
+ unsigned long flags;
while (1) {
+ spin_lock_irqsave(&uc->vc.lock, flags);
+
if (uc->desc) {
/* Get previous residue and time stamp */
residue_diff = uc->tx_drain.residue;
@@ -1127,6 +1130,8 @@ static void udma_check_tx_completion(struct work_struct *work)
break;
}
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
+
usleep_range(ktime_to_us(delay),
ktime_to_us(delay) + 10);
continue;
@@ -1143,6 +1148,8 @@ static void udma_check_tx_completion(struct work_struct *work)
break;
}
+
+ spin_unlock_irqrestore(&uc->vc.lock, flags);
}
static irqreturn_t udma_ring_irq_handler(int irq, void *data)
@@ -4246,7 +4253,6 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct udma_dev *ud = ofdma->of_dma_data;
- dma_cap_mask_t mask = ud->ddev.cap_mask;
struct udma_filter_param filter_param;
struct dma_chan *chan;
@@ -4278,7 +4284,7 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
}
}
- chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
+ chan = __dma_request_channel(&ud->ddev.cap_mask, udma_dma_filter_fn, &filter_param,
ofdma->of_node);
if (!chan) {
dev_err(ud->dev, "get channel fail in %s.\n", __func__);
diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
index 1877201d1aa9..20bdc52f63a5 100644
--- a/drivers/dpll/dpll_core.c
+++ b/drivers/dpll/dpll_core.c
@@ -443,8 +443,11 @@ static void dpll_pin_prop_free(struct dpll_pin_properties *prop)
static int dpll_pin_prop_dup(const struct dpll_pin_properties *src,
struct dpll_pin_properties *dst)
{
+ if (WARN_ON(src->freq_supported && !src->freq_supported_num))
+ return -EINVAL;
+
memcpy(dst, src, sizeof(*dst));
- if (src->freq_supported && src->freq_supported_num) {
+ if (src->freq_supported) {
size_t freq_size = src->freq_supported_num *
sizeof(*src->freq_supported);
dst->freq_supported = kmemdup(src->freq_supported,
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 3e971f902363..dcd7008fe06b 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -99,7 +99,7 @@ static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id)
if (status & priv->ecc_stat_ce_mask) {
regmap_read(drvdata->mc_vbase, priv->ecc_saddr_offset,
&err_addr);
- if (priv->ecc_uecnt_offset)
+ if (priv->ecc_cecnt_offset)
regmap_read(drvdata->mc_vbase, priv->ecc_cecnt_offset,
&err_count);
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count,
@@ -1005,9 +1005,6 @@ altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask,
}
}
- /* Interrupt mode set to every SBERR */
- regmap_write(ecc_mgr_map, ALTR_A10_ECC_INTMODE_OFST,
- ALTR_A10_ECC_INTMODE);
/* Enable ECC */
ecc_set_bits(ecc_ctrl_en_mask, (ecc_block_base +
ALTR_A10_ECC_CTRL_OFST));
@@ -2127,6 +2124,10 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
return PTR_ERR(edac->ecc_mgr_map);
}
+ /* Set irq mask for DDR SBE to avoid any pending irq before registration */
+ regmap_write(edac->ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_SET_OFST,
+ (A10_SYSMGR_ECC_INTMASK_SDMMCB | A10_SYSMGR_ECC_INTMASK_DDR0));
+
edac->irq_chip.name = pdev->dev.of_node->name;
edac->irq_chip.irq_mask = a10_eccmgr_irq_mask;
edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask;
diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
index 3727e72c8c2e..7248d24c4908 100644
--- a/drivers/edac/altera_edac.h
+++ b/drivers/edac/altera_edac.h
@@ -249,6 +249,8 @@ struct altr_sdram_mc_data {
#define A10_SYSMGR_ECC_INTMASK_SET_OFST 0x94
#define A10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98
#define A10_SYSMGR_ECC_INTMASK_OCRAM BIT(1)
+#define A10_SYSMGR_ECC_INTMASK_SDMMCB BIT(16)
+#define A10_SYSMGR_ECC_INTMASK_DDR0 BIT(17)
#define A10_SYSMGR_ECC_INTSTAT_SERR_OFST 0x9C
#define A10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 9b02a6b43ab5..a8dd55ec52ce 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -408,10 +408,9 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
int i, j, ret;
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
- struct dimm_data dimm_info[IE31200_CHANNELS][IE31200_DIMMS_PER_CHANNEL];
void __iomem *window;
struct ie31200_priv *priv;
- u32 addr_decode, mad_offset;
+ u32 addr_decode[IE31200_CHANNELS], mad_offset;
/*
* Kaby Lake, Coffee Lake seem to work like Skylake. Please re-visit
@@ -469,19 +468,10 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
mad_offset = IE31200_MAD_DIMM_0_OFFSET;
}
- /* populate DIMM info */
for (i = 0; i < IE31200_CHANNELS; i++) {
- addr_decode = readl(window + mad_offset +
+ addr_decode[i] = readl(window + mad_offset +
(i * 4));
- edac_dbg(0, "addr_decode: 0x%x\n", addr_decode);
- for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) {
- populate_dimm_info(&dimm_info[i][j], addr_decode, j,
- skl);
- edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n",
- dimm_info[i][j].size,
- dimm_info[i][j].dual_rank,
- dimm_info[i][j].x16_width);
- }
+ edac_dbg(0, "addr_decode: 0x%x\n", addr_decode[i]);
}
/*
@@ -492,14 +482,22 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
*/
for (i = 0; i < IE31200_DIMMS_PER_CHANNEL; i++) {
for (j = 0; j < IE31200_CHANNELS; j++) {
+ struct dimm_data dimm_info;
struct dimm_info *dimm;
unsigned long nr_pages;
- nr_pages = IE31200_PAGES(dimm_info[j][i].size, skl);
+ populate_dimm_info(&dimm_info, addr_decode[j], i,
+ skl);
+ edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n",
+ dimm_info.size,
+ dimm_info.dual_rank,
+ dimm_info.x16_width);
+
+ nr_pages = IE31200_PAGES(dimm_info.size, skl);
if (nr_pages == 0)
continue;
- if (dimm_info[j][i].dual_rank) {
+ if (dimm_info.dual_rank) {
nr_pages = nr_pages / 2;
dimm = edac_get_dimm(mci, (i * 2) + 1, j, 0);
dimm->nr_pages = nr_pages;
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
index fa09a82b4492..eacde92802f5 100644
--- a/drivers/firmware/arm_ffa/bus.c
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -213,6 +213,7 @@ ffa_device_register(const struct ffa_partition_info *part_info,
dev = &ffa_dev->dev;
dev->bus = &ffa_bus_type;
dev->release = ffa_release_device;
+ dev->dma_mask = &dev->coherent_dma_mask;
dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
ffa_dev->id = id;
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 655672a88095..d545d2513205 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -150,6 +150,14 @@ static int ffa_version_check(u32 *version)
return -EOPNOTSUPP;
}
+ if (FFA_MAJOR_VERSION(ver.a0) > FFA_MAJOR_VERSION(FFA_DRIVER_VERSION)) {
+ pr_err("Incompatible v%d.%d! Latest supported v%d.%d\n",
+ FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
+ FFA_MAJOR_VERSION(FFA_DRIVER_VERSION),
+ FFA_MINOR_VERSION(FFA_DRIVER_VERSION));
+ return -EINVAL;
+ }
+
if (ver.a0 < FFA_MIN_VERSION) {
pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n",
FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
@@ -280,7 +288,8 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
memcpy(buffer + idx, drv_info->rx_buffer + idx * sz,
buf_sz);
- ffa_rx_release();
+ if (!(flags & PARTITION_INFO_GET_RETURN_COUNT_ONLY))
+ ffa_rx_release();
mutex_unlock(&drv_info->rx_lock);
@@ -1449,6 +1458,10 @@ static int ffa_setup_partitions(void)
kfree(pbuf);
+ /* Check if the host is already added as part of partition info */
+ if (xa_load(&drv_info->partition_info, drv_info->vm_id))
+ return 0;
+
/* Allocate for the host */
ret = ffa_xa_add_partition_info(drv_info->vm_id);
if (ret)
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index a3386bf36de5..5d799f662696 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -42,7 +42,7 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
* This helper let an SCMI driver request specific devices identified by the
* @id_table to be created for each active SCMI instance.
*
- * The requested device name MUST NOT be already existent for any protocol;
+ * The requested device name MUST NOT be already existent for this protocol;
* at first the freshly requested @id_table is annotated in the IDR table
* @scmi_requested_devices and then the requested device is advertised to any
* registered party via the @scmi_requested_devices_nh notification chain.
@@ -52,7 +52,6 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
{
int ret = 0;
- unsigned int id = 0;
struct list_head *head, *phead = NULL;
struct scmi_requested_dev *rdev;
@@ -67,19 +66,13 @@ static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
}
/*
- * Search for the matching protocol rdev list and then search
- * of any existent equally named device...fails if any duplicate found.
+ * Find the matching protocol rdev list and then search of any
+ * existent equally named device...fails if any duplicate found.
*/
mutex_lock(&scmi_requested_devices_mtx);
- idr_for_each_entry(&scmi_requested_devices, head, id) {
- if (!phead) {
- /* A list found registered in the IDR is never empty */
- rdev = list_first_entry(head, struct scmi_requested_dev,
- node);
- if (rdev->id_table->protocol_id ==
- id_table->protocol_id)
- phead = head;
- }
+ phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
+ if (phead) {
+ head = phead;
list_for_each_entry(rdev, head, node) {
if (!strcmp(rdev->id_table->name, id_table->name)) {
pr_err("Ignoring duplicate request [%d] %s\n",
@@ -260,6 +253,9 @@ static struct scmi_device *scmi_child_dev_find(struct device *parent,
if (!dev)
return NULL;
+ /* Drop the refcnt bumped implicitly by device_find_child */
+ put_device(dev);
+
return to_scmi_dev(dev);
}
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 1c75a4c9c371..0390d5ff195e 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -1248,7 +1248,8 @@ static void xfer_put(const struct scmi_protocol_handle *ph,
}
static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
- struct scmi_xfer *xfer, ktime_t stop)
+ struct scmi_xfer *xfer, ktime_t stop,
+ bool *ooo)
{
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
@@ -1257,7 +1258,7 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
* in case of out-of-order receptions of delayed responses
*/
return info->desc->ops->poll_done(cinfo, xfer) ||
- try_wait_for_completion(&xfer->done) ||
+ (*ooo = try_wait_for_completion(&xfer->done)) ||
ktime_after(ktime_get(), stop);
}
@@ -1274,15 +1275,17 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
* itself to support synchronous commands replies.
*/
if (!desc->sync_cmds_completed_on_ret) {
+ bool ooo = false;
+
/*
* Poll on xfer using transport provided .poll_done();
* assumes no completion interrupt was available.
*/
ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
- spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
- xfer, stop));
- if (ktime_after(ktime_get(), stop)) {
+ spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer,
+ stop, &ooo));
+ if (!ooo && !info->desc->ops->poll_done(cinfo, xfer)) {
dev_err(dev,
"timed out in resp(caller: %pS) - polling\n",
(void *)_RET_IP_);
diff --git a/drivers/firmware/cirrus/Kconfig b/drivers/firmware/cirrus/Kconfig
index 0a883091259a..e3c2e38b746d 100644
--- a/drivers/firmware/cirrus/Kconfig
+++ b/drivers/firmware/cirrus/Kconfig
@@ -6,14 +6,11 @@ config FW_CS_DSP
config FW_CS_DSP_KUNIT_TEST_UTILS
tristate
- depends on KUNIT && REGMAP
- select FW_CS_DSP
config FW_CS_DSP_KUNIT_TEST
tristate "KUnit tests for Cirrus Logic cs_dsp" if !KUNIT_ALL_TESTS
- depends on KUNIT && REGMAP
+ depends on KUNIT && REGMAP && FW_CS_DSP
default KUNIT_ALL_TESTS
- select FW_CS_DSP
select FW_CS_DSP_KUNIT_TEST_UTILS
help
This builds KUnit tests for cs_dsp.
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 3c52cb73237a..e3f990d888d7 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -1224,22 +1224,28 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
if (!svc->intel_svc_fcs) {
dev_err(dev, "failed to allocate %s device\n", INTEL_FCS);
ret = -ENOMEM;
- goto err_unregister_dev;
+ goto err_unregister_rsu_dev;
}
ret = platform_device_add(svc->intel_svc_fcs);
if (ret) {
platform_device_put(svc->intel_svc_fcs);
- goto err_unregister_dev;
+ goto err_unregister_rsu_dev;
}
+ ret = of_platform_default_populate(dev_of_node(dev), NULL, dev);
+ if (ret)
+ goto err_unregister_fcs_dev;
+
dev_set_drvdata(dev, svc);
pr_info("Intel Service Layer Driver Initialized\n");
return 0;
-err_unregister_dev:
+err_unregister_fcs_dev:
+ platform_device_unregister(svc->intel_svc_fcs);
+err_unregister_rsu_dev:
platform_device_unregister(svc->stratix10_svc_rsu);
err_free_kfifo:
kfifo_free(&controller->svc_fifo);
@@ -1253,6 +1259,8 @@ static void stratix10_svc_drv_remove(struct platform_device *pdev)
struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev);
struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev);
+ of_platform_depopulate(ctrl->dev);
+
platform_device_unregister(svc->intel_svc_fcs);
platform_device_unregister(svc->stratix10_svc_rsu);
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 720fa8b5d8e9..7356e860e65c 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -1139,17 +1139,13 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status);
int zynqmp_pm_fpga_get_config_status(u32 *value)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
- u32 buf, lower_addr, upper_addr;
int ret;
if (!value)
return -EINVAL;
- lower_addr = lower_32_bits((u64)&buf);
- upper_addr = upper_32_bits((u64)&buf);
-
ret = zynqmp_pm_invoke_fn(PM_FPGA_READ, ret_payload, 4,
- XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, lower_addr, upper_addr,
+ XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, 0, 0,
XILINX_ZYNQMP_PM_FPGA_READ_CONFIG_REG);
*value = ret_payload[1];
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index 6b0914432445..5af0bd33890c 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -52,7 +52,7 @@
/* V2 Defines */
#define VSE_CVP_TX_CREDITS 0x49 /* 8bit */
-#define V2_CREDIT_TIMEOUT_US 20000
+#define V2_CREDIT_TIMEOUT_US 40000
#define V2_CHECK_CREDIT_US 10
#define V2_POLL_TIMEOUT_US 1000000
#define V2_USER_TIMEOUT_US 500000
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index d63c1030e6ac..6121ee6beaa3 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -1203,6 +1203,8 @@ static int pca953x_restore_context(struct pca953x_chip *chip)
guard(mutex)(&chip->i2c_lock);
+ if (chip->client->irq > 0)
+ enable_irq(chip->client->irq);
regcache_cache_only(chip->regmap, false);
regcache_mark_dirty(chip->regmap);
ret = pca953x_regcache_sync(chip);
@@ -1215,6 +1217,10 @@ static int pca953x_restore_context(struct pca953x_chip *chip)
static void pca953x_save_context(struct pca953x_chip *chip)
{
guard(mutex)(&chip->i2c_lock);
+
+ /* Disable IRQ to prevent early triggering while regmap "cache only" is on */
+ if (chip->client->irq > 0)
+ disable_irq(chip->client->irq);
regcache_cache_only(chip->regmap, true);
}
diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c
index e89f299f2140..dcecb7a25911 100644
--- a/drivers/gpio/gpio-virtuser.c
+++ b/drivers/gpio/gpio-virtuser.c
@@ -400,10 +400,15 @@ static ssize_t gpio_virtuser_direction_do_write(struct file *file,
char buf[32], *trimmed;
int ret, dir, val = 0;
- ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
+ if (count >= sizeof(buf))
+ return -EINVAL;
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
if (ret < 0)
return ret;
+ buf[ret] = '\0';
+
trimmed = strim(buf);
if (strcmp(trimmed, "input") == 0) {
@@ -622,12 +627,15 @@ static ssize_t gpio_virtuser_consumer_write(struct file *file,
char buf[GPIO_VIRTUSER_NAME_BUF_LEN + 2];
int ret;
+ if (count >= sizeof(buf))
+ return -EINVAL;
+
ret = simple_write_to_buffer(buf, GPIO_VIRTUSER_NAME_BUF_LEN, ppos,
user_buf, count);
if (ret < 0)
return ret;
- buf[strlen(buf) - 1] = '\0';
+ buf[ret] = '\0';
ret = gpiod_set_consumer_name(data->ad.desc, buf);
if (ret)
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 176e9142fd8f..56f13e4fa361 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -259,6 +259,9 @@ static void of_gpio_set_polarity_by_property(const struct device_node *np,
{ "fsl,imx8qm-fec", "phy-reset-gpios", "phy-reset-active-high" },
{ "fsl,s32v234-fec", "phy-reset-gpios", "phy-reset-active-high" },
#endif
+#if IS_ENABLED(CONFIG_MMC_ATMELMCI)
+ { "atmel,hsmci", "cd-gpios", "cd-inverted" },
+#endif
#if IS_ENABLED(CONFIG_PCI_IMX6)
{ "fsl,imx6q-pcie", "reset-gpio", "reset-gpio-active-high" },
{ "fsl,imx6sx-pcie", "reset-gpio", "reset-gpio-active-high" },
@@ -285,9 +288,6 @@ static void of_gpio_set_polarity_by_property(const struct device_node *np,
{ "regulator-gpio", "enable-gpio", "enable-active-high" },
{ "regulator-gpio", "enable-gpios", "enable-active-high" },
#endif
-#if IS_ENABLED(CONFIG_MMC_ATMELMCI)
- { "atmel,hsmci", "cd-gpios", "cd-inverted" },
-#endif
};
unsigned int i;
bool active_high;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 0c00ed2ab431..960ca0ad45fc 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2577,6 +2577,9 @@ int gpio_do_set_config(struct gpio_desc *desc, unsigned long config)
return -ENOTSUPP;
ret = guard.gc->set_config(guard.gc, gpio_chip_hwgpio(desc), config);
+ if (ret > 0)
+ ret = -EBADE;
+
#ifdef CONFIG_GPIO_CDEV
/*
* Special case - if we're setting debounce period, we need to store
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index fbef3f471bd0..bd228dc77e99 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -188,7 +188,7 @@ config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
bool "Enable refcount backtrace history in the DP MST helpers"
depends on STACKTRACE_SUPPORT
select STACKDEPOT
- depends on DRM_KMS_HELPER
+ select DRM_KMS_HELPER
depends on DEBUG_KERNEL
depends on EXPERT
help
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 69895fccb474..90f688b3d9d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -352,7 +352,6 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
AMDGPU_CP_KIQ_IRQ_LAST
};
-#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
#define MAX_KIQ_REG_TRY 1000
@@ -1119,6 +1118,7 @@ struct amdgpu_device {
bool in_s3;
bool in_s4;
bool in_s0ix;
+ suspend_state_t last_suspend_state;
enum pp_mp1_state mp1_state;
struct amdgpu_doorbell_index doorbell_index;
@@ -1187,9 +1187,15 @@ struct amdgpu_device {
bool debug_enable_ras_aca;
bool debug_exp_resets;
- bool enforce_isolation[MAX_XCP];
- /* Added this mutex for cleaner shader isolation between GFX and compute processes */
+ /* Protection for the following isolation structure */
struct mutex enforce_isolation_mutex;
+ bool enforce_isolation[MAX_XCP];
+ struct amdgpu_isolation {
+ void *owner;
+ struct dma_fence *spearhead;
+ struct amdgpu_sync active;
+ struct amdgpu_sync prev;
+ } isolation[MAX_XCP];
struct amdgpu_init_level *init_lvl;
};
@@ -1470,6 +1476,9 @@ void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev);
struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
struct dma_fence *gang);
+struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_job *job);
bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring);
ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset);
@@ -1593,11 +1602,9 @@ static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_cap
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
-void amdgpu_choose_low_power_state(struct amdgpu_device *adev);
#else
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
-static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
#endif
void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index b8d4e07d2043..bebfbc1497d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1533,22 +1533,4 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
#endif /* CONFIG_AMD_PMC */
}
-/**
- * amdgpu_choose_low_power_state
- *
- * @adev: amdgpu_device_pointer
- *
- * Choose the target low power state for the GPU
- */
-void amdgpu_choose_low_power_state(struct amdgpu_device *adev)
-{
- if (adev->in_runpm)
- return;
-
- if (amdgpu_acpi_is_s0ix_active(adev))
- adev->in_s0ix = true;
- else if (amdgpu_acpi_is_s3_active(adev))
- adev->in_s3 = true;
-}
-
#endif /* CONFIG_SUSPEND */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 8af67f18500a..55d539967695 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -47,6 +47,7 @@ enum TLB_FLUSH_TYPE {
};
struct amdgpu_device;
+struct kfd_process_device;
struct amdgpu_reset_context;
enum kfd_mem_attachment_type {
@@ -192,7 +193,7 @@ int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
#if IS_ENABLED(CONFIG_HSA_AMD)
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
+void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo);
int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
unsigned long cur_seq, struct kgd_mem *mem);
int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
@@ -212,9 +213,8 @@ struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
}
static inline
-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
+void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo)
{
- return 0;
}
static inline
@@ -299,14 +299,10 @@ bool amdgpu_amdkfd_compute_active(struct amdgpu_device *adev, uint32_t node_id);
(&((struct amdgpu_fpriv *) \
((struct drm_file *)(drm_priv))->driver_priv)->vm)
-int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
- struct amdgpu_vm *avm, u32 pasid);
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
struct amdgpu_vm *avm,
void **process_info,
struct dma_fence **ef);
-void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
- void *drm_priv);
uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv);
size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
uint8_t xcp_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 1e998f972c30..b3c8eae46042 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -370,40 +370,32 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
return 0;
}
-int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
+/**
+ * amdgpu_amdkfd_remove_all_eviction_fences - Remove all eviction fences
+ * @bo: the BO where to remove the evictions fences from.
+ *
+ * This functions should only be used on release when all references to the BO
+ * are already dropped. We remove the eviction fence from the private copy of
+ * the dma_resv object here since that is what is used during release to
+ * determine of the BO is idle or not.
+ */
+void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo)
{
- struct amdgpu_bo *root = bo;
- struct amdgpu_vm_bo_base *vm_bo;
- struct amdgpu_vm *vm;
- struct amdkfd_process_info *info;
- struct amdgpu_amdkfd_fence *ef;
- int ret;
+ struct dma_resv *resv = &bo->tbo.base._resv;
+ struct dma_fence *fence, *stub;
+ struct dma_resv_iter cursor;
- /* we can always get vm_bo from root PD bo.*/
- while (root->parent)
- root = root->parent;
+ dma_resv_assert_held(resv);
- vm_bo = root->vm_bo;
- if (!vm_bo)
- return 0;
-
- vm = vm_bo->vm;
- if (!vm)
- return 0;
-
- info = vm->process_info;
- if (!info || !info->eviction_fence)
- return 0;
-
- ef = container_of(dma_fence_get(&info->eviction_fence->base),
- struct amdgpu_amdkfd_fence, base);
-
- BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
- ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
- dma_resv_unlock(bo->tbo.base.resv);
+ stub = dma_fence_get_stub();
+ dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
+ if (!to_amdgpu_amdkfd_fence(fence))
+ continue;
- dma_fence_put(&ef->base);
- return ret;
+ dma_resv_replace_fences(resv, fence->context, stub,
+ DMA_RESV_USAGE_BOOKKEEP);
+ }
+ dma_fence_put(stub);
}
static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
@@ -499,7 +491,7 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
if (ret)
return ret;
- return amdgpu_sync_fence(sync, vm->last_update);
+ return amdgpu_sync_fence(sync, vm->last_update, GFP_KERNEL);
}
static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
@@ -1263,7 +1255,7 @@ static int unmap_bo_from_gpuvm(struct kgd_mem *mem,
(void)amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
- (void)amdgpu_sync_fence(sync, bo_va->last_pt_update);
+ (void)amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL);
return 0;
}
@@ -1287,7 +1279,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
return ret;
}
- return amdgpu_sync_fence(sync, bo_va->last_pt_update);
+ return amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL);
}
static int map_bo_to_gpuvm(struct kgd_mem *mem,
@@ -1529,27 +1521,6 @@ static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
amdgpu_bo_unreserve(bo);
}
-int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
- struct amdgpu_vm *avm, u32 pasid)
-
-{
- int ret;
-
- /* Free the original amdgpu allocated pasid,
- * will be replaced with kfd allocated pasid.
- */
- if (avm->pasid) {
- amdgpu_pasid_free(avm->pasid);
- amdgpu_vm_set_pasid(adev, avm, 0);
- }
-
- ret = amdgpu_vm_set_pasid(adev, avm, pasid);
- if (ret)
- return ret;
-
- return 0;
-}
-
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
struct amdgpu_vm *avm,
void **process_info,
@@ -1607,27 +1578,6 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
}
}
-void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
- void *drm_priv)
-{
- struct amdgpu_vm *avm;
-
- if (WARN_ON(!adev || !drm_priv))
- return;
-
- avm = drm_priv_to_vm(drm_priv);
-
- pr_debug("Releasing process vm %p\n", avm);
-
- /* The original pasid of amdgpu vm has already been
- * released during making a amdgpu vm to a compute vm
- * The current pasid is managed by kfd and will be
- * released on kfd process destroy. Set amdgpu pasid
- * to 0 to avoid duplicate release.
- */
- amdgpu_vm_release_compute(adev, avm);
-}
-
uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
{
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
@@ -2969,7 +2919,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
}
dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
DMA_RESV_USAGE_KERNEL, fence) {
- ret = amdgpu_sync_fence(&sync_obj, fence);
+ ret = amdgpu_sync_fence(&sync_obj, fence, GFP_KERNEL);
if (ret) {
pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
goto validate_map_fail;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 5cc5f59e3018..4a5b406601fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -428,7 +428,7 @@ static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
dma_fence_put(old);
}
- r = amdgpu_sync_fence(&p->sync, fence);
+ r = amdgpu_sync_fence(&p->sync, fence, GFP_KERNEL);
dma_fence_put(fence);
if (r)
return r;
@@ -450,7 +450,7 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
return r;
}
- r = amdgpu_sync_fence(&p->sync, fence);
+ r = amdgpu_sync_fence(&p->sync, fence, GFP_KERNEL);
dma_fence_put(fence);
return r;
}
@@ -1124,7 +1124,8 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update);
+ r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update,
+ GFP_KERNEL);
if (r)
return r;
@@ -1135,7 +1136,8 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
+ r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update,
+ GFP_KERNEL);
if (r)
return r;
}
@@ -1154,7 +1156,8 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
+ r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update,
+ GFP_KERNEL);
if (r)
return r;
}
@@ -1167,7 +1170,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->sync, vm->last_update);
+ r = amdgpu_sync_fence(&p->sync, vm->last_update, GFP_KERNEL);
if (r)
return r;
@@ -1248,7 +1251,8 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
continue;
}
- r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
+ r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence,
+ GFP_KERNEL);
dma_fence_put(fence);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index cfdf558b48b6..02138aa55793 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -109,7 +109,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_exec exec;
int r;
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 71e8a76180ad..28190b0ac5fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -224,6 +224,24 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
static DEVICE_ATTR(pcie_replay_count, 0444,
amdgpu_device_get_pcie_replay_count, NULL);
+static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ if (!amdgpu_sriov_vf(adev))
+ ret = sysfs_create_file(&adev->dev->kobj,
+ &dev_attr_pcie_replay_count.attr);
+
+ return ret;
+}
+
+static void amdgpu_device_attr_sysfs_fini(struct amdgpu_device *adev)
+{
+ if (!amdgpu_sriov_vf(adev))
+ sysfs_remove_file(&adev->dev->kobj,
+ &dev_attr_pcie_replay_count.attr);
+}
+
static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t ppos, size_t count)
@@ -4123,11 +4141,6 @@ static bool amdgpu_device_check_iommu_remap(struct amdgpu_device *adev)
}
#endif
-static const struct attribute *amdgpu_dev_attributes[] = {
- &dev_attr_pcie_replay_count.attr,
- NULL
-};
-
static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
{
if (amdgpu_mcbp == 1)
@@ -4232,6 +4245,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->gfx.reset_sem_mutex);
/* Initialize the mutex for cleaner shader isolation between GFX and compute processes */
mutex_init(&adev->enforce_isolation_mutex);
+ for (i = 0; i < MAX_XCP; ++i) {
+ adev->isolation[i].spearhead = dma_fence_get_stub();
+ amdgpu_sync_create(&adev->isolation[i].active);
+ amdgpu_sync_create(&adev->isolation[i].prev);
+ }
mutex_init(&adev->gfx.kfd_sch_mutex);
amdgpu_device_init_apu_flags(adev);
@@ -4352,10 +4370,17 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
- /* Get rid of things like offb */
- r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name);
- if (r)
- return r;
+ /*
+ * No need to remove conflicting FBs for non-display class devices.
+ * This prevents the sysfb from being freed accidently.
+ */
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA ||
+ (pdev->class >> 8) == PCI_CLASS_DISPLAY_OTHER) {
+ /* Get rid of things like offb */
+ r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name);
+ if (r)
+ return r;
+ }
/* Enable TMZ based on IP_VERSION */
amdgpu_gmc_tmz_set(adev);
@@ -4567,7 +4592,7 @@ fence_driver_init:
} else
adev->ucode_sysfs_en = true;
- r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
+ r = amdgpu_device_attr_sysfs_init(adev);
if (r)
dev_err(adev->dev, "Could not create amdgpu device attr\n");
@@ -4704,7 +4729,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_pm_sysfs_fini(adev);
if (adev->ucode_sysfs_en)
amdgpu_ucode_sysfs_fini(adev);
- sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
+ amdgpu_device_attr_sysfs_fini(adev);
amdgpu_fru_sysfs_fini(adev);
amdgpu_reg_state_sysfs_fini(adev);
@@ -4731,7 +4756,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
void amdgpu_device_fini_sw(struct amdgpu_device *adev)
{
- int idx;
+ int i, idx;
bool px;
amdgpu_device_ip_fini(adev);
@@ -4739,6 +4764,11 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
adev->accel_working = false;
dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
+ for (i = 0; i < MAX_XCP; ++i) {
+ dma_fence_put(adev->isolation[i].spearhead);
+ amdgpu_sync_free(&adev->isolation[i].active);
+ amdgpu_sync_free(&adev->isolation[i].prev);
+ }
amdgpu_reset_fini(adev);
@@ -4755,6 +4785,9 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
kfree(adev->fru_info);
adev->fru_info = NULL;
+ kfree(adev->xcp_mgr);
+ adev->xcp_mgr = NULL;
+
px = amdgpu_device_supports_px(adev_to_drm(adev));
if (px || (!dev_is_removable(&adev->pdev->dev) &&
@@ -4819,28 +4852,20 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
* @data: data
*
* This function is called when the system is about to suspend or hibernate.
- * It is used to evict resources from the device before the system goes to
- * sleep while there is still access to swap.
+ * It is used to set the appropriate flags so that eviction can be optimized
+ * in the pm prepare callback.
*/
static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
void *data)
{
struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb);
- int r;
switch (mode) {
case PM_HIBERNATION_PREPARE:
adev->in_s4 = true;
- fallthrough;
- case PM_SUSPEND_PREPARE:
- r = amdgpu_device_evict_resources(adev);
- /*
- * This is considered non-fatal at this time because
- * amdgpu_device_prepare() will also fatally evict resources.
- * See https://gitlab.freedesktop.org/drm/amd/-/issues/3781
- */
- if (r)
- drm_warn(adev_to_drm(adev), "Failed to evict resources, freeze active processes if problems occur: %d\n", r);
+ break;
+ case PM_POST_HIBERNATION:
+ adev->in_s4 = false;
break;
}
@@ -4861,15 +4886,13 @@ int amdgpu_device_prepare(struct drm_device *dev)
struct amdgpu_device *adev = drm_to_adev(dev);
int i, r;
- amdgpu_choose_low_power_state(adev);
-
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
/* Evict the majority of BOs before starting suspend sequence */
r = amdgpu_device_evict_resources(adev);
if (r)
- goto unprepare;
+ return r;
flush_delayed_work(&adev->gfx.gfx_off_delay_work);
@@ -4880,15 +4903,10 @@ int amdgpu_device_prepare(struct drm_device *dev)
continue;
r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]);
if (r)
- goto unprepare;
+ return r;
}
return 0;
-
-unprepare:
- adev->in_s0ix = adev->in_s3 = adev->in_s4 = false;
-
- return r;
}
/**
@@ -6875,6 +6893,92 @@ struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
return NULL;
}
+/**
+ * amdgpu_device_enforce_isolation - enforce HW isolation
+ * @adev: the amdgpu device pointer
+ * @ring: the HW ring the job is supposed to run on
+ * @job: the job which is about to be pushed to the HW ring
+ *
+ * Makes sure that only one client at a time can use the GFX block.
+ * Returns: The dependency to wait on before the job can be pushed to the HW.
+ * The function is called multiple times until NULL is returned.
+ */
+struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_job *job)
+{
+ struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
+ struct drm_sched_fence *f = job->base.s_fence;
+ struct dma_fence *dep;
+ void *owner;
+ int r;
+
+ /*
+ * For now enforce isolation only for the GFX block since we only need
+ * the cleaner shader on those rings.
+ */
+ if (ring->funcs->type != AMDGPU_RING_TYPE_GFX &&
+ ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
+ return NULL;
+
+ /*
+ * All submissions where enforce isolation is false are handled as if
+ * they come from a single client. Use ~0l as the owner to distinct it
+ * from kernel submissions where the owner is NULL.
+ */
+ owner = job->enforce_isolation ? f->owner : (void *)~0l;
+
+ mutex_lock(&adev->enforce_isolation_mutex);
+
+ /*
+ * The "spearhead" submission is the first one which changes the
+ * ownership to its client. We always need to wait for it to be
+ * pushed to the HW before proceeding with anything.
+ */
+ if (&f->scheduled != isolation->spearhead &&
+ !dma_fence_is_signaled(isolation->spearhead)) {
+ dep = isolation->spearhead;
+ goto out_grab_ref;
+ }
+
+ if (isolation->owner != owner) {
+
+ /*
+ * Wait for any gang to be assembled before switching to a
+ * different owner or otherwise we could deadlock the
+ * submissions.
+ */
+ if (!job->gang_submit) {
+ dep = amdgpu_device_get_gang(adev);
+ if (!dma_fence_is_signaled(dep))
+ goto out_return_dep;
+ dma_fence_put(dep);
+ }
+
+ dma_fence_put(isolation->spearhead);
+ isolation->spearhead = dma_fence_get(&f->scheduled);
+ amdgpu_sync_move(&isolation->active, &isolation->prev);
+ isolation->owner = owner;
+ }
+
+ /*
+ * Specifying the ring here helps to pipeline submissions even when
+ * isolation is enabled. If that is not desired for testing NULL can be
+ * used instead of the ring to enforce a CPU round trip while switching
+ * between clients.
+ */
+ dep = amdgpu_sync_peek_fence(&isolation->prev, ring);
+ r = amdgpu_sync_fence(&isolation->active, &f->finished, GFP_NOWAIT);
+ if (r)
+ DRM_WARN("OOM tracking isolation\n");
+
+out_grab_ref:
+ dma_fence_get(dep);
+out_return_dep:
+ mutex_unlock(&adev->enforce_isolation_mutex);
+ return dep;
+}
+
bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 949d74eff294..6a6dc15273dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -113,8 +113,7 @@
#include "amdgpu_isp.h"
#endif
-#define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
-MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
+MODULE_FIRMWARE("amdgpu/ip_discovery.bin");
#define mmIP_DISCOVERY_VERSION 0x16A00
#define mmRCC_CONFIG_MEMSIZE 0xde3
@@ -297,21 +296,13 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
return ret;
}
-static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
+static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev,
+ uint8_t *binary,
+ const char *fw_name)
{
const struct firmware *fw;
- const char *fw_name;
int r;
- switch (amdgpu_discovery) {
- case 2:
- fw_name = FIRMWARE_IP_DISCOVERY;
- break;
- default:
- dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
- return -EINVAL;
- }
-
r = request_firmware(&fw, fw_name, adev->dev);
if (r) {
dev_err(adev->dev, "can't load firmware \"%s\"\n",
@@ -404,10 +395,19 @@ static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
return 0;
}
+static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev)
+{
+ if (amdgpu_discovery == 2)
+ return "amdgpu/ip_discovery.bin";
+
+ return NULL;
+}
+
static int amdgpu_discovery_init(struct amdgpu_device *adev)
{
struct table_info *info;
struct binary_header *bhdr;
+ const char *fw_name;
uint16_t offset;
uint16_t size;
uint16_t checksum;
@@ -419,9 +419,10 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
return -ENOMEM;
/* Read from file if it is the preferred option */
- if (amdgpu_discovery == 2) {
+ fw_name = amdgpu_discovery_get_fw_name(adev);
+ if (fw_name != NULL) {
dev_info(adev->dev, "use ip discovery information from file");
- r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
+ r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name);
if (r) {
dev_err(adev->dev, "failed to read ip discovery binary from file\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index c9842a0e2a1c..cb043296f9ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -43,6 +43,29 @@
#include <linux/dma-fence-array.h>
#include <linux/pci-p2pdma.h>
+static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops;
+
+/**
+ * dma_buf_attach_adev - Helper to get adev of an attachment
+ *
+ * @attach: attachment
+ *
+ * Returns:
+ * A struct amdgpu_device * if the attaching device is an amdgpu device or
+ * partition, NULL otherwise.
+ */
+static struct amdgpu_device *dma_buf_attach_adev(struct dma_buf_attachment *attach)
+{
+ if (attach->importer_ops == &amdgpu_dma_buf_attach_ops) {
+ struct drm_gem_object *obj = attach->importer_priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ return amdgpu_ttm_adev(bo->tbo.bdev);
+ }
+
+ return NULL;
+}
+
/**
* amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
*
@@ -54,11 +77,13 @@
static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attach)
{
+ struct amdgpu_device *attach_adev = dma_buf_attach_adev(attach);
struct drm_gem_object *obj = dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
+ if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) &&
+ pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
amdgpu_vm_bo_update_shared(bo);
@@ -459,6 +484,9 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
struct drm_gem_object *obj = &bo->tbo.base;
struct drm_gem_object *gobj;
+ if (!adev)
+ return false;
+
if (obj->import_attach) {
struct dma_buf *dma_buf = obj->import_attach->dmabuf;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 24c255e05079..e4ce33e69a48 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -173,6 +173,7 @@ uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu;
char *amdgpu_virtual_display;
bool enforce_isolation;
+int amdgpu_modeset = -1;
/* Specifies the default granularity for SVM, used in buffer
* migration and restoration of backing memory when handling
@@ -1034,6 +1035,13 @@ module_param(enforce_isolation, bool, 0444);
MODULE_PARM_DESC(enforce_isolation, "enforce process isolation between graphics and compute . enforce_isolation = on");
/**
+ * DOC: modeset (int)
+ * Override nomodeset (1 = override, -1 = auto). The default is -1 (auto).
+ */
+MODULE_PARM_DESC(modeset, "Override nomodeset (1 = enable, -1 = auto)");
+module_param_named(modeset, amdgpu_modeset, int, 0444);
+
+/**
* DOC: seamless (int)
* Seamless boot will keep the image on the screen during the boot process.
*/
@@ -2244,6 +2252,12 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
int ret, retry = 0, i;
bool supports_atomic = false;
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA ||
+ (pdev->class >> 8) == PCI_CLASS_DISPLAY_OTHER) {
+ if (drm_firmware_drivers_only() && amdgpu_modeset == -1)
+ return -EINVAL;
+ }
+
/* skip devices which are owned by radeon */
for (i = 0; i < ARRAY_SIZE(amdgpu_unsupported_pciidlist); i++) {
if (amdgpu_unsupported_pciidlist[i] == pdev->device)
@@ -2515,8 +2529,20 @@ static int amdgpu_pmops_suspend(struct device *dev)
adev->in_s0ix = true;
else if (amdgpu_acpi_is_s3_active(adev))
adev->in_s3 = true;
- if (!adev->in_s0ix && !adev->in_s3)
+ if (!adev->in_s0ix && !adev->in_s3) {
+ /* don't allow going deep first time followed by s2idle the next time */
+ if (adev->last_suspend_state != PM_SUSPEND_ON &&
+ adev->last_suspend_state != pm_suspend_target_state) {
+ drm_err_once(drm_dev, "Unsupported suspend state %d\n",
+ pm_suspend_target_state);
+ return -EINVAL;
+ }
return 0;
+ }
+
+ /* cache the state last used for suspend */
+ adev->last_suspend_state = pm_suspend_target_state;
+
return amdgpu_device_suspend(drm_dev, true);
}
@@ -2570,13 +2596,8 @@ static int amdgpu_pmops_freeze(struct device *dev)
static int amdgpu_pmops_thaw(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(drm_dev);
- int r;
-
- r = amdgpu_device_resume(drm_dev, true);
- adev->in_s4 = false;
- return r;
+ return amdgpu_device_resume(drm_dev, true);
}
static int amdgpu_pmops_poweroff(struct device *dev)
@@ -2589,9 +2610,6 @@ static int amdgpu_pmops_poweroff(struct device *dev)
static int amdgpu_pmops_restore(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(drm_dev);
-
- adev->in_s4 = false;
return amdgpu_device_resume(drm_dev, true);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index c1f35ded684e..506786784e32 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -1411,9 +1411,11 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
struct drm_gpu_scheduler *sched = &ring->sched;
struct drm_sched_entity entity;
+ static atomic_t counter;
struct dma_fence *f;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
+ void *owner;
int i, r;
/* Initialize the scheduler entity */
@@ -1424,9 +1426,15 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
goto err;
}
- r = amdgpu_job_alloc_with_ib(ring->adev, &entity, NULL,
- 64, 0,
- &job);
+ /*
+ * Use some unique dummy value as the owner to make sure we execute
+ * the cleaner shader on each submission. The value just need to change
+ * for each submission and is otherwise meaningless.
+ */
+ owner = (void *)(unsigned long)atomic_inc_return(&counter);
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner,
+ 64, 0, &job);
if (r)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 1c19a65e6553..ef74259c448d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -678,12 +678,10 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
uint32_t flush_type, bool all_hub,
uint32_t inst)
{
- u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT :
- adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
unsigned int ndw;
- int r;
+ int r, cnt = 0;
uint32_t seq;
/*
@@ -740,10 +738,21 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq[inst].ring_lock);
- if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) {
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY &&
+ !amdgpu_reset_pending(adev->reset_domain)) {
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY) {
dev_err(adev->dev, "timeout waiting for kiq fence\n");
r = -ETIME;
- }
+ } else
+ r = 0;
}
error_unlock_reset:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 8e712a11aba5..92ab821afc06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -209,7 +209,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring,
return 0;
}
- fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL);
+ fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_NOWAIT);
if (!fences)
return -ENOMEM;
@@ -287,46 +287,34 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
(*id)->flushed_updates < updates ||
!(*id)->last_flush ||
((*id)->last_flush->context != fence_context &&
- !dma_fence_is_signaled((*id)->last_flush))) {
+ !dma_fence_is_signaled((*id)->last_flush)))
+ needs_flush = true;
+
+ if ((*id)->owner != vm->immediate.fence_context ||
+ (!adev->vm_manager.concurrent_flush && needs_flush)) {
struct dma_fence *tmp;
- /* Wait for the gang to be assembled before using a
- * reserved VMID or otherwise the gang could deadlock.
+ /* Don't use per engine and per process VMID at the
+ * same time
*/
- tmp = amdgpu_device_get_gang(adev);
- if (!dma_fence_is_signaled(tmp) && tmp != job->gang_submit) {
+ if (adev->vm_manager.concurrent_flush)
+ ring = NULL;
+
+ /* to prevent one context starved by another context */
+ (*id)->pd_gpu_addr = 0;
+ tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
+ if (tmp) {
*id = NULL;
- *fence = tmp;
+ *fence = dma_fence_get(tmp);
return 0;
}
- dma_fence_put(tmp);
-
- /* Make sure the id is owned by the gang before proceeding */
- if (!job->gang_submit ||
- (*id)->owner != vm->immediate.fence_context) {
-
- /* Don't use per engine and per process VMID at the
- * same time
- */
- if (adev->vm_manager.concurrent_flush)
- ring = NULL;
-
- /* to prevent one context starved by another context */
- (*id)->pd_gpu_addr = 0;
- tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
- if (tmp) {
- *id = NULL;
- *fence = dma_fence_get(tmp);
- return 0;
- }
- }
- needs_flush = true;
}
/* Good we can use this VMID. Remember this submission as
* user of the VMID.
*/
- r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished);
+ r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished,
+ GFP_NOWAIT);
if (r)
return r;
@@ -385,7 +373,8 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
* user of the VMID.
*/
r = amdgpu_sync_fence(&(*id)->active,
- &job->base.s_fence->finished);
+ &job->base.s_fence->finished,
+ GFP_NOWAIT);
if (r)
return r;
@@ -437,7 +426,8 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* Remember this submission as user of the VMID */
r = amdgpu_sync_fence(&id->active,
- &job->base.s_fence->finished);
+ &job->base.s_fence->finished,
+ GFP_NOWAIT);
if (r)
goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 7d4395a5d8ac..b0a88f92cd82 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -78,6 +78,9 @@ struct amdgpu_ih_ring {
#define amdgpu_ih_ts_after(t1, t2) \
(((int64_t)((t2) << 16) - (int64_t)((t1) << 16)) > 0LL)
+#define amdgpu_ih_ts_after_or_equal(t1, t2) \
+ (((int64_t)((t2) << 16) - (int64_t)((t1) << 16)) >= 0LL)
+
/* provided by the ih block */
struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 100f04475943..685c61a05af8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -342,17 +342,24 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
struct amdgpu_job *job = to_amdgpu_job(sched_job);
- struct dma_fence *fence = NULL;
+ struct dma_fence *fence;
int r;
r = drm_sched_entity_error(s_entity);
if (r)
goto error;
- if (job->gang_submit)
+ if (job->gang_submit) {
fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
+ if (fence)
+ return fence;
+ }
+
+ fence = amdgpu_device_enforce_isolation(ring->adev, ring, job);
+ if (fence)
+ return fence;
- if (!fence && job->vm && !job->vmid) {
+ if (job->vm && !job->vmid) {
r = amdgpu_vmid_grab(job->vm, ring, job, &fence);
if (r) {
dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r);
@@ -365,9 +372,10 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
*/
if (!fence)
job->vm = NULL;
+ return fence;
}
- return fence;
+ return NULL;
error:
dma_fence_set_error(&job->base.s_fence->finished, r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 6fa20980a0b1..e4251d0691c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -1335,14 +1335,14 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
DRM_ERROR("failed to do vm_bo_update on meta data\n");
goto error_del_bo_va;
}
- amdgpu_sync_fence(&sync, bo_va->last_pt_update);
+ amdgpu_sync_fence(&sync, bo_va->last_pt_update, GFP_KERNEL);
r = amdgpu_vm_update_pdes(adev, vm, false);
if (r) {
DRM_ERROR("failed to update pdes on meta data\n");
goto error_del_bo_va;
}
- amdgpu_sync_fence(&sync, vm->last_update);
+ amdgpu_sync_fence(&sync, vm->last_update, GFP_KERNEL);
amdgpu_sync_wait(&sync, false);
drm_exec_fini(&exec);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 00752e3f9d8a..0b9987781f76 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1295,28 +1295,36 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (abo->kfd_bo)
amdgpu_amdkfd_release_notify(abo);
- /* We only remove the fence if the resv has individualized. */
- WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
- && bo->base.resv != &bo->base._resv);
- if (bo->base.resv == &bo->base._resv)
- amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
+ /*
+ * We lock the private dma_resv object here and since the BO is about to
+ * be released nobody else should have a pointer to it.
+ * So when this locking here fails something is wrong with the reference
+ * counting.
+ */
+ if (WARN_ON_ONCE(!dma_resv_trylock(&bo->base._resv)))
+ return;
+
+ amdgpu_amdkfd_remove_all_eviction_fences(abo);
if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev)))
- return;
+ goto out;
- if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
- return;
+ r = dma_resv_reserve_fences(&bo->base._resv, 1);
+ if (r)
+ goto out;
- r = amdgpu_fill_buffer(abo, 0, bo->base.resv, &fence, true);
- if (!WARN_ON(r)) {
- amdgpu_vram_mgr_set_cleared(bo->resource);
- amdgpu_bo_fence(abo, fence, false);
- dma_fence_put(fence);
- }
+ r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true);
+ if (WARN_ON(r))
+ goto out;
+
+ amdgpu_vram_mgr_set_cleared(bo->resource);
+ dma_resv_add_fence(&bo->base._resv, fence, DMA_RESV_USAGE_KERNEL);
+ dma_fence_put(fence);
- dma_resv_unlock(bo->base.resv);
+out:
+ dma_resv_unlock(&bo->base._resv);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index e5fc80ed06ea..6dded11a23ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -44,7 +44,7 @@
#include "amdgpu_securedisplay.h"
#include "amdgpu_atomfirmware.h"
-#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3)
+#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*16)
static int psp_load_smu_fw(struct psp_context *psp);
static int psp_rap_terminate(struct psp_context *psp);
@@ -533,7 +533,6 @@ static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
- struct psp_gfx_cmd_resp *cmd = psp->cmd;
psp_memory_training_fini(psp);
@@ -543,8 +542,8 @@ static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_ucode_release(&psp->cap_fw);
amdgpu_ucode_release(&psp->toc_fw);
- kfree(cmd);
- cmd = NULL;
+ kfree(psp->cmd);
+ psp->cmd = NULL;
psp_free_shared_bufs(psp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index f0924aa3f4e4..0c338dcdde48 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1864,6 +1864,9 @@ int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
if (!obj || obj->attr_inuse)
return -EINVAL;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_virt_ras_telemetry_block_en(adev, head->block))
+ return 0;
+
get_obj(obj);
snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 52c16bfeccaa..12ffe4a963d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -748,7 +748,7 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
/* Modify the header if it exceeds.
*/
if (amdgpu_bad_page_threshold != 0 &&
- control->ras_num_bad_pages >= ras->bad_page_cnt_threshold) {
+ control->ras_num_bad_pages > ras->bad_page_cnt_threshold) {
dev_warn(adev->dev,
"Saved bad pages %d reaches threshold value %d\n",
control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
@@ -806,7 +806,7 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
*/
if (amdgpu_bad_page_threshold != 0 &&
control->tbl_hdr.version == RAS_TABLE_VER_V2_1 &&
- control->ras_num_bad_pages < ras->bad_page_cnt_threshold)
+ control->ras_num_bad_pages <= ras->bad_page_cnt_threshold)
control->tbl_rai.health_percent = ((ras->bad_page_cnt_threshold -
control->ras_num_bad_pages) * 100) /
ras->bad_page_cnt_threshold;
@@ -1451,7 +1451,7 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
res);
return -EINVAL;
}
- if (ras->bad_page_cnt_threshold > control->ras_num_bad_pages) {
+ if (ras->bad_page_cnt_threshold >= control->ras_num_bad_pages) {
/* This means that, the threshold was increased since
* the last time the system was booted, and now,
* ras->bad_page_cnt_threshold - control->num_recs > 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index c586ab4c911b..34fc742fda91 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -152,7 +152,8 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
*
* Add the fence to the sync object.
*/
-int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f)
+int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
+ gfp_t flags)
{
struct amdgpu_sync_entry *e;
@@ -162,7 +163,7 @@ int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f)
if (amdgpu_sync_add_later(sync, f))
return 0;
- e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
+ e = kmem_cache_alloc(amdgpu_sync_slab, flags);
if (!e)
return -ENOMEM;
@@ -249,7 +250,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct dma_fence *tmp = dma_fence_chain_contained(f);
if (amdgpu_sync_test_fence(adev, mode, owner, tmp)) {
- r = amdgpu_sync_fence(sync, f);
+ r = amdgpu_sync_fence(sync, f, GFP_KERNEL);
dma_fence_put(f);
if (r)
return r;
@@ -281,7 +282,7 @@ int amdgpu_sync_kfd(struct amdgpu_sync *sync, struct dma_resv *resv)
if (fence_owner != AMDGPU_FENCE_OWNER_KFD)
continue;
- r = amdgpu_sync_fence(sync, f);
+ r = amdgpu_sync_fence(sync, f, GFP_KERNEL);
if (r)
break;
}
@@ -388,7 +389,7 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
hash_for_each_safe(source->fences, i, tmp, e, node) {
f = e->fence;
if (!dma_fence_is_signaled(f)) {
- r = amdgpu_sync_fence(clone, f);
+ r = amdgpu_sync_fence(clone, f, GFP_KERNEL);
if (r)
return r;
} else {
@@ -400,6 +401,25 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
}
/**
+ * amdgpu_sync_move - move all fences from src to dst
+ *
+ * @src: source of the fences, empty after function
+ * @dst: destination for the fences
+ *
+ * Moves all fences from source to destination. All fences in destination are
+ * freed and source is empty after the function call.
+ */
+void amdgpu_sync_move(struct amdgpu_sync *src, struct amdgpu_sync *dst)
+{
+ unsigned int i;
+
+ amdgpu_sync_free(dst);
+
+ for (i = 0; i < HASH_SIZE(src->fences); ++i)
+ hlist_move_list(&src->fences[i], &dst->fences[i]);
+}
+
+/**
* amdgpu_sync_push_to_job - push fences into job
* @sync: sync object to get the fences from
* @job: job to push the fences into
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index e3272dce798d..51eb4382c91e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -47,7 +47,8 @@ struct amdgpu_sync {
};
void amdgpu_sync_create(struct amdgpu_sync *sync);
-int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f);
+int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
+ gfp_t flags);
int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct dma_resv *resv, enum amdgpu_sync_mode mode,
void *owner);
@@ -56,6 +57,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone);
+void amdgpu_sync_move(struct amdgpu_sync *src, struct amdgpu_sync *dst);
int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job);
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr);
void amdgpu_sync_free(struct amdgpu_sync *sync);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index eafe20d8fe0b..0a1ef95b2866 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -387,6 +387,45 @@ int amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
return 0;
}
+static int amdgpu_umc_loop_all_aid(struct amdgpu_device *adev, umc_func func,
+ void *data)
+{
+ uint32_t umc_node_inst;
+ uint32_t node_inst;
+ uint32_t umc_inst;
+ uint32_t ch_inst;
+ int ret;
+
+ /*
+ * This loop is done based on the following -
+ * umc.active mask = mask of active umc instances across all nodes
+ * umc.umc_inst_num = maximum number of umc instancess per node
+ * umc.node_inst_num = maximum number of node instances
+ * Channel instances are not assumed to be harvested.
+ */
+ dev_dbg(adev->dev, "active umcs :%lx umc_inst per node: %d",
+ adev->umc.active_mask, adev->umc.umc_inst_num);
+ for_each_set_bit(umc_node_inst, &(adev->umc.active_mask),
+ adev->umc.node_inst_num * adev->umc.umc_inst_num) {
+ node_inst = umc_node_inst / adev->umc.umc_inst_num;
+ umc_inst = umc_node_inst % adev->umc.umc_inst_num;
+ LOOP_UMC_CH_INST(ch_inst) {
+ dev_dbg(adev->dev,
+ "node_inst :%d umc_inst: %d ch_inst: %d",
+ node_inst, umc_inst, ch_inst);
+ ret = func(adev, node_inst, umc_inst, ch_inst, data);
+ if (ret) {
+ dev_err(adev->dev,
+ "Node %d umc %d ch %d func returns %d\n",
+ node_inst, umc_inst, ch_inst, ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
umc_func func, void *data)
{
@@ -395,6 +434,9 @@ int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
uint32_t ch_inst = 0;
int ret = 0;
+ if (adev->aid_mask)
+ return amdgpu_umc_loop_all_aid(adev, func, data);
+
if (adev->umc.node_inst_num) {
LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
ret = func(adev, node_inst, umc_inst, ch_inst, data);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index adaf4388ad28..ce66a938f41a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -66,7 +66,6 @@
#define VCN_ENC_CMD_REG_WAIT 0x0000000c
#define VCN_AON_SOC_ADDRESS_2_0 0x1f800
-#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define VCN_VID_IP_ADDRESS_2_0 0x0
#define VCN_AON_IP_ADDRESS_2_0 0x30000
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 13e5709ea1ca..e6f0152e5b08 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -1247,7 +1247,8 @@ amdgpu_ras_block_to_sriov(struct amdgpu_device *adev, enum amdgpu_ras_block bloc
case AMDGPU_RAS_BLOCK__MPIO:
return RAS_TELEMETRY_GPU_BLOCK_MPIO;
default:
- dev_err(adev->dev, "Unsupported SRIOV RAS telemetry block 0x%x\n", block);
+ DRM_WARN_ONCE("Unsupported SRIOV RAS telemetry block 0x%x\n",
+ block);
return RAS_TELEMETRY_GPU_BLOCK_COUNT;
}
}
@@ -1332,3 +1333,17 @@ int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev)
return 0;
}
+
+bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block)
+{
+ enum amd_sriov_ras_telemetry_gpu_block sriov_block;
+
+ sriov_block = amdgpu_ras_block_to_sriov(adev, block);
+
+ if (sriov_block >= RAS_TELEMETRY_GPU_BLOCK_COUNT ||
+ !amdgpu_sriov_ras_telemetry_block_en(adev, sriov_block))
+ return false;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 0ca73343a768..0f3ccae5c1ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -407,4 +407,6 @@ bool amdgpu_virt_get_ras_capability(struct amdgpu_device *adev);
int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_block block,
struct ras_err_data *err_data);
int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev);
+bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 22aa4a8f1189..21be10d46cf9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -754,6 +754,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
bool need_pipe_sync)
{
struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
unsigned vmhub = ring->vm_hub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
@@ -761,8 +762,9 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
bool gds_switch_needed = ring->funcs->emit_gds_switch &&
job->gds_switch_needed;
bool vm_flush_needed = job->vm_needs_flush;
- struct dma_fence *fence = NULL;
+ bool cleaner_shader_needed = false;
bool pasid_mapping_needed = false;
+ struct dma_fence *fence = NULL;
unsigned int patch;
int r;
@@ -785,8 +787,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
ring->funcs->emit_wreg;
+ cleaner_shader_needed = adev->gfx.enable_cleaner_shader &&
+ ring->funcs->emit_cleaner_shader && job->base.s_fence &&
+ &job->base.s_fence->scheduled == isolation->spearhead;
+
if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync &&
- !(job->enforce_isolation && !job->vmid))
+ !cleaner_shader_needed)
return 0;
amdgpu_ring_ib_begin(ring);
@@ -797,9 +803,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
if (need_pipe_sync)
amdgpu_ring_emit_pipeline_sync(ring);
- if (adev->gfx.enable_cleaner_shader &&
- ring->funcs->emit_cleaner_shader &&
- job->enforce_isolation)
+ if (cleaner_shader_needed)
ring->funcs->emit_cleaner_shader(ring);
if (vm_flush_needed) {
@@ -821,7 +825,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
job->oa_size);
}
- if (vm_flush_needed || pasid_mapping_needed) {
+ if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) {
r = amdgpu_fence_emit(ring, &fence, NULL, 0);
if (r)
return r;
@@ -843,6 +847,17 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
id->pasid_mapping = dma_fence_get(fence);
mutex_unlock(&id_mgr->lock);
}
+
+ /*
+ * Make sure that all other submissions wait for the cleaner shader to
+ * finish before we push them to the HW.
+ */
+ if (cleaner_shader_needed) {
+ mutex_lock(&adev->enforce_isolation_mutex);
+ dma_fence_put(isolation->spearhead);
+ isolation->spearhead = dma_fence_get(fence);
+ mutex_unlock(&adev->enforce_isolation_mutex);
+ }
dma_fence_put(fence);
amdgpu_ring_patch_cond_exec(ring, patch);
@@ -2672,20 +2687,6 @@ unreserve_bo:
return r;
}
-/**
- * amdgpu_vm_release_compute - release a compute vm
- * @adev: amdgpu_device pointer
- * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
- *
- * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
- * pasid from vm. Compute should stop use of vm after this call.
- */
-void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
-{
- amdgpu_vm_set_pasid(adev, vm, 0);
- vm->is_compute_context = false;
-}
-
static int amdgpu_vm_stats_is_zero(struct amdgpu_vm *vm)
{
for (int i = 0; i < __AMDGPU_PL_NUM; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 5010a3107bf8..f3ad687125ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -489,7 +489,6 @@ int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
unsigned int num_fences);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 5ba263fe5512..09178d56afbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4739,6 +4739,20 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
break;
}
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(10, 1, 10):
+ adev->gfx.cleaner_shader_ptr = gfx_10_1_10_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_10_1_10_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 101 &&
+ adev->gfx.pfp_fw_version >= 158 &&
+ adev->gfx.mec_fw_version >= 152) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
case IP_VERSION(10, 3, 0):
case IP_VERSION(10, 3, 2):
case IP_VERSION(10, 3, 4):
@@ -6044,7 +6058,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
@@ -6122,7 +6136,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
@@ -6199,7 +6213,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
@@ -6574,7 +6588,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h
index 663c2572d440..5255378af53c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h
@@ -21,6 +21,41 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+/* Define the cleaner shader gfx_10_1_10 */
+static const u32 gfx_10_1_10_cleaner_shader_hex[] = {
+ 0xb0804004, 0xbf8a0000,
+ 0xbf068100, 0xbf840023,
+ 0xbe8203b8, 0xbefc0380,
+ 0x7e008480, 0x7e028480,
+ 0x7e048480, 0x7e068480,
+ 0x7e088480, 0x7e0a8480,
+ 0x7e0c8480, 0x7e0e8480,
+ 0xbefc0302, 0x80828802,
+ 0xbf84fff5, 0xbe8203ff,
+ 0x80000000, 0x87020102,
+ 0xbf840012, 0xbefe03c1,
+ 0xbeff03c1, 0xd7650001,
+ 0x0001007f, 0xd7660001,
+ 0x0002027e, 0x16020288,
+ 0xbe8203bf, 0xbefc03c1,
+ 0xd9382000, 0x00020201,
+ 0xd9386040, 0x00040401,
+ 0xd70f6a01, 0x000202ff,
+ 0x00000400, 0x80828102,
+ 0xbf84fff7, 0xbefc03ff,
+ 0x00000068, 0xbe803080,
+ 0xbe813080, 0xbe823080,
+ 0xbe833080, 0x80fc847c,
+ 0xbf84fffa, 0xbeea0480,
+ 0xbeec0480, 0xbeee0480,
+ 0xbef00480, 0xbef20480,
+ 0xbef40480, 0xbef60480,
+ 0xbef80480, 0xbefa0480,
+ 0xbf810000, 0xbf9f0000,
+ 0xbf9f0000, 0xbf9f0000,
+ 0xbf9f0000, 0xbf9f0000,
+};
+
/* Define the cleaner shader gfx_10_3_0 */
static const u32 gfx_10_3_0_cleaner_shader_hex[] = {
0xb0804004, 0xbf8a0000,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm b/drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm
new file mode 100644
index 000000000000..9ba3359253c9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+// This shader is to clean LDS, SGPRs and VGPRs. It is first 64 Dwords or 256 bytes of 256 Dwords cleaner shader.
+
+// GFX10.1 : Clear SGPRs, VGPRs and LDS
+// Launch 32 waves per CU (16 per SIMD) as a workgroup (threadgroup) to fill every wave slot
+// Waves are "wave32" and have 64 VGPRs each, which uses all 1024 VGPRs per SIMD
+// Waves are launched in "CU" mode, and the workgroup shares 64KB of LDS (half of the WGP's LDS)
+// It takes 2 workgroups to use all of LDS: one on each CU of the WGP
+// Each wave clears SGPRs 0 - 107
+// Each wave clears VGPRs 0 - 63
+// The first wave of the workgroup clears its 64KB of LDS
+// The shader starts with "S_BARRIER" to ensure SPI has launched all waves of the workgroup
+// before any wave in the workgroup could end. Without this, it is possible not all SGPRs get cleared.
+
+
+shader main
+ asic(GFX10.1)
+ type(CS)
+ wave_size(32)
+// Note: original source code from SQ team
+
+//
+// Create 32 waves in a threadgroup (CS waves)
+// Each allocates 64 VGPRs
+// The workgroup allocates all of LDS (64kbytes)
+//
+// Takes about 2500 clocks to run.
+// (theorhetical fastest = 1024clks vgpr + 640lds = 1660 clks)
+//
+ S_BARRIER
+ s_cmp_eq_u32 s0, 1 // Bit0 is set, sgpr0 is set then clear VGPRS and LDS as FW set COMPUTE_USER_DATA_0
+ s_cbranch_scc0 label_0023 // Clean VGPRs and LDS if sgpr0 of wave is set, scc = (s0 == 1)
+
+ s_mov_b32 s2, 0x00000038 // Loop 64/8=8 times (loop unrolled for performance)
+ s_mov_b32 m0, 0
+ //
+ // CLEAR VGPRs
+ //
+label_0005:
+ v_movreld_b32 v0, 0
+ v_movreld_b32 v1, 0
+ v_movreld_b32 v2, 0
+ v_movreld_b32 v3, 0
+ v_movreld_b32 v4, 0
+ v_movreld_b32 v5, 0
+ v_movreld_b32 v6, 0
+ v_movreld_b32 v7, 0
+ s_mov_b32 m0, s2
+ s_sub_u32 s2, s2, 8
+ s_cbranch_scc0 label_0005
+ //
+ s_mov_b32 s2, 0x80000000 // Bit31 is first_wave
+ s_and_b32 s2, s2, s0 // sgpr0 has tg_size (first_wave) term as in ucode only COMPUTE_PGM_RSRC2.tg_size_en is set
+ s_cbranch_scc0 label_0023 // Clean LDS if its first wave of ThreadGroup/WorkGroup
+ // CLEAR LDS
+ //
+ s_mov_b32 exec_lo, 0xffffffff
+ s_mov_b32 exec_hi, 0xffffffff
+ v_mbcnt_lo_u32_b32 v1, exec_hi, 0 // Set V1 to thread-ID (0..63)
+ v_mbcnt_hi_u32_b32 v1, exec_lo, v1 // Set V1 to thread-ID (0..63)
+ v_mul_u32_u24 v1, 0x00000008, v1 // * 8, so each thread is a double-dword address (8byte)
+ s_mov_b32 s2, 0x00000003f // 64 loop iterations
+ s_mov_b32 m0, 0xffffffff
+ // Clear all of LDS space
+ // Each FirstWave of WorkGroup clears 64kbyte block
+
+label_001F:
+ ds_write2_b64 v1, v[2:3], v[2:3] offset1:32
+ ds_write2_b64 v1, v[4:5], v[4:5] offset0:64 offset1:96
+ v_add_co_u32 v1, vcc, 0x00000400, v1
+ s_sub_u32 s2, s2, 1
+ s_cbranch_scc0 label_001F
+
+ //
+ // CLEAR SGPRs
+ //
+label_0023:
+ s_mov_b32 m0, 0x00000068 // Loop 108/4=27 times (loop unrolled for performance)
+label_sgpr_loop:
+ s_movreld_b32 s0, 0
+ s_movreld_b32 s1, 0
+ s_movreld_b32 s2, 0
+ s_movreld_b32 s3, 0
+ s_sub_u32 m0, m0, 4
+ s_cbranch_scc0 label_sgpr_loop
+
+ //clear vcc
+ s_mov_b64 vcc, 0 //clear vcc
+ //s_setreg_imm32_b32 hw_reg_shader_flat_scratch_lo, 0 //clear flat scratch lo SGPR
+ //s_setreg_imm32_b32 hw_reg_shader_flat_scratch_hi, 0 //clear flat scratch hi SGPR
+ s_mov_b64 ttmp0, 0 //Clear ttmp0 and ttmp1
+ s_mov_b64 ttmp2, 0 //Clear ttmp2 and ttmp3
+ s_mov_b64 ttmp4, 0 //Clear ttmp4 and ttmp5
+ s_mov_b64 ttmp6, 0 //Clear ttmp6 and ttmp7
+ s_mov_b64 ttmp8, 0 //Clear ttmp8 and ttmp9
+ s_mov_b64 ttmp10, 0 //Clear ttmp10 and ttmp11
+ s_mov_b64 ttmp12, 0 //Clear ttmp12 and ttmp13
+ s_mov_b64 ttmp14, 0 //Clear ttmp14 and ttmp15
+
+ s_endpgm
+
+end
+
+
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index cfb51baa581a..e050c2e4ea73 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -64,6 +64,23 @@
#define regPC_CONFIG_CNTL_1 0x194d
#define regPC_CONFIG_CNTL_1_BASE_IDX 1
+#define regCP_GFX_MQD_CONTROL_DEFAULT 0x00000100
+#define regCP_GFX_HQD_VMID_DEFAULT 0x00000000
+#define regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT 0x00000000
+#define regCP_GFX_HQD_QUANTUM_DEFAULT 0x00000a01
+#define regCP_GFX_HQD_CNTL_DEFAULT 0x00a00000
+#define regCP_RB_DOORBELL_CONTROL_DEFAULT 0x00000000
+#define regCP_GFX_HQD_RPTR_DEFAULT 0x00000000
+
+#define regCP_HQD_EOP_CONTROL_DEFAULT 0x00000006
+#define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT 0x00000000
+#define regCP_MQD_CONTROL_DEFAULT 0x00000100
+#define regCP_HQD_PQ_CONTROL_DEFAULT 0x00308509
+#define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT 0x00000000
+#define regCP_HQD_PQ_RPTR_DEFAULT 0x00000000
+#define regCP_HQD_PERSISTENT_STATE_DEFAULT 0x0be05501
+#define regCP_HQD_IB_CONTROL_DEFAULT 0x00300000
+
MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin");
@@ -2391,7 +2408,7 @@ static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
@@ -2435,7 +2452,7 @@ static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
@@ -2480,7 +2497,7 @@ static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
@@ -3115,7 +3132,7 @@ static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
@@ -3333,7 +3350,7 @@ static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
@@ -3958,7 +3975,7 @@ static void gfx_v11_0_gfx_mqd_set_priority(struct amdgpu_device *adev,
if (prop->hqd_pipe_priority == AMDGPU_GFX_PIPE_PRIO_HIGH)
priority = 1;
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY);
+ tmp = regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, priority);
mqd->cp_gfx_hqd_queue_priority = tmp;
}
@@ -3980,14 +3997,14 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
/* set up mqd control */
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL);
+ tmp = regCP_GFX_MQD_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
mqd->cp_gfx_mqd_control = tmp;
/* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID);
+ tmp = regCP_GFX_HQD_VMID_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
mqd->cp_gfx_hqd_vmid = 0;
@@ -3995,7 +4012,7 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
gfx_v11_0_gfx_mqd_set_priority(adev, mqd, prop);
/* set up time quantum */
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM);
+ tmp = regCP_GFX_HQD_QUANTUM_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
mqd->cp_gfx_hqd_quantum = tmp;
@@ -4017,7 +4034,7 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
/* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
rb_bufsz = order_base_2(prop->queue_size / 4) - 1;
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL);
+ tmp = regCP_GFX_HQD_CNTL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
#ifdef __BIG_ENDIAN
@@ -4026,7 +4043,7 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
- tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
+ tmp = regCP_RB_DOORBELL_CONTROL_DEFAULT;
if (prop->use_doorbell) {
tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
DOORBELL_OFFSET, prop->doorbell_index);
@@ -4038,7 +4055,7 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_rb_doorbell_control = tmp;
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
- mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR);
+ mqd->cp_gfx_hqd_rptr = regCP_GFX_HQD_RPTR_DEFAULT;
/* active the queue */
mqd->cp_gfx_hqd_active = 1;
@@ -4124,14 +4141,14 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL);
+ tmp = regCP_HQD_EOP_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
(order_base_2(GFX11_MEC_HPD_SIZE / 4) - 1));
mqd->cp_hqd_eop_control = tmp;
/* enable doorbell? */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
+ tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
if (prop->use_doorbell) {
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
@@ -4160,7 +4177,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
/* set MQD vmid to 0 */
- tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL);
+ tmp = regCP_MQD_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
mqd->cp_mqd_control = tmp;
@@ -4170,7 +4187,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
/* set up the HQD, this is similar to CP_RB0_CNTL */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL);
+ tmp = regCP_HQD_PQ_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
(order_base_2(prop->queue_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
@@ -4196,7 +4213,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
tmp = 0;
/* enable the doorbell if requested */
if (prop->use_doorbell) {
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
+ tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_OFFSET, prop->doorbell_index);
@@ -4211,17 +4228,17 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_pq_doorbell_control = tmp;
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
- mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR);
+ mqd->cp_hqd_pq_rptr = regCP_HQD_PQ_RPTR_DEFAULT;
/* set the vmid for the queue */
mqd->cp_hqd_vmid = 0;
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE);
+ tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55);
mqd->cp_hqd_persistent_state = tmp;
/* set MIN_IB_AVAIL_SIZE */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL);
+ tmp = regCP_HQD_IB_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
mqd->cp_hqd_ib_control = tmp;
@@ -4549,7 +4566,7 @@ static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev)
if (r)
return r;
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index c21b168f75a7..2ec900d50d7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -52,6 +52,24 @@
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
+#define regCP_GFX_MQD_CONTROL_DEFAULT 0x00000100
+#define regCP_GFX_HQD_VMID_DEFAULT 0x00000000
+#define regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT 0x00000000
+#define regCP_GFX_HQD_QUANTUM_DEFAULT 0x00000a01
+#define regCP_GFX_HQD_CNTL_DEFAULT 0x00f00000
+#define regCP_RB_DOORBELL_CONTROL_DEFAULT 0x00000000
+#define regCP_GFX_HQD_RPTR_DEFAULT 0x00000000
+
+#define regCP_HQD_EOP_CONTROL_DEFAULT 0x00000006
+#define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT 0x00000000
+#define regCP_MQD_CONTROL_DEFAULT 0x00000100
+#define regCP_HQD_PQ_CONTROL_DEFAULT 0x00308509
+#define regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT 0x00000000
+#define regCP_HQD_PQ_RPTR_DEFAULT 0x00000000
+#define regCP_HQD_PERSISTENT_STATE_DEFAULT 0x0be05501
+#define regCP_HQD_IB_CONTROL_DEFAULT 0x00300000
+
+
MODULE_FIRMWARE("amdgpu/gc_12_0_0_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_0_me.bin");
MODULE_FIRMWARE("amdgpu/gc_12_0_0_mec.bin");
@@ -2306,7 +2324,7 @@ static int gfx_v12_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
@@ -2450,7 +2468,7 @@ static int gfx_v12_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
@@ -2891,25 +2909,25 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
/* set up mqd control */
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL);
+ tmp = regCP_GFX_MQD_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
mqd->cp_gfx_mqd_control = tmp;
/* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID);
+ tmp = regCP_GFX_HQD_VMID_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
mqd->cp_gfx_hqd_vmid = 0;
/* set up default queue priority level
* 0x0 = low priority, 0x1 = high priority */
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY);
+ tmp = regCP_GFX_HQD_QUEUE_PRIORITY_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0);
mqd->cp_gfx_hqd_queue_priority = tmp;
/* set up time quantum */
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM);
+ tmp = regCP_GFX_HQD_QUANTUM_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
mqd->cp_gfx_hqd_quantum = tmp;
@@ -2931,7 +2949,7 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
/* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
rb_bufsz = order_base_2(prop->queue_size / 4) - 1;
- tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL);
+ tmp = regCP_GFX_HQD_CNTL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
#ifdef __BIG_ENDIAN
@@ -2940,7 +2958,7 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
- tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
+ tmp = regCP_RB_DOORBELL_CONTROL_DEFAULT;
if (prop->use_doorbell) {
tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
DOORBELL_OFFSET, prop->doorbell_index);
@@ -2952,7 +2970,7 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_rb_doorbell_control = tmp;
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
- mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR);
+ mqd->cp_gfx_hqd_rptr = regCP_GFX_HQD_RPTR_DEFAULT;
/* active the queue */
mqd->cp_gfx_hqd_active = 1;
@@ -3047,14 +3065,14 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL);
+ tmp = regCP_HQD_EOP_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
(order_base_2(GFX12_MEC_HPD_SIZE / 4) - 1));
mqd->cp_hqd_eop_control = tmp;
/* enable doorbell? */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
+ tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
if (prop->use_doorbell) {
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
@@ -3083,7 +3101,7 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
/* set MQD vmid to 0 */
- tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL);
+ tmp = regCP_MQD_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
mqd->cp_mqd_control = tmp;
@@ -3093,7 +3111,7 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
/* set up the HQD, this is similar to CP_RB0_CNTL */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL);
+ tmp = regCP_HQD_PQ_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
(order_base_2(prop->queue_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
@@ -3118,7 +3136,7 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
tmp = 0;
/* enable the doorbell if requested */
if (prop->use_doorbell) {
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
+ tmp = regCP_HQD_PQ_DOORBELL_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_OFFSET, prop->doorbell_index);
@@ -3133,17 +3151,17 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
mqd->cp_hqd_pq_doorbell_control = tmp;
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
- mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR);
+ mqd->cp_hqd_pq_rptr = regCP_HQD_PQ_RPTR_DEFAULT;
/* set the vmid for the queue */
mqd->cp_hqd_vmid = 0;
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE);
+ tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55);
mqd->cp_hqd_persistent_state = tmp;
/* set MIN_IB_AVAIL_SIZE */
- tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL);
+ tmp = regCP_HQD_IB_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
mqd->cp_hqd_ib_control = tmp;
@@ -3469,7 +3487,7 @@ static int gfx_v12_0_gfxhub_enable(struct amdgpu_device *adev)
if (r)
return r;
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 0e3ddea7b8e0..a7bfc9f41d0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -92,12 +92,12 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
{
uint64_t value;
- /* Program the AGP BAR */
- WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0);
- WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
- WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
-
if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
+ /* Program the AGP BAR */
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0);
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
+ WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
+
/* Program the system aperture low logical page number. */
WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 9bedca9a79c6..a88ad9951d32 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -268,7 +268,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
/* flush hdp cache */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
@@ -969,7 +969,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
adev->hdp.funcs->init_registers(adev);
/* Flush HDP after it is initialized */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 72751ab4c766..0ba82eabca02 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -229,7 +229,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
/* flush hdp cache */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
@@ -750,6 +750,18 @@ static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->gmc.vram_type = vram_type;
adev->gmc.vram_vendor = vram_vendor;
+ /* The mall_size is already calculated as mall_size_per_umc * num_umc.
+ * However, for gfx1151, which features a 2-to-1 UMC mapping,
+ * the result must be multiplied by 2 to determine the actual mall size.
+ */
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(11, 5, 1):
+ adev->gmc.mall_size *= 2;
+ break;
+ default:
+ break;
+ }
+
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
@@ -896,7 +908,7 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
return r;
/* Flush HDP after it is initialized */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index c3c144a4f45e..0f136d6bbdc9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -297,7 +297,7 @@ static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
return;
/* flush hdp cache */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
@@ -881,7 +881,7 @@ static int gmc_v12_0_gart_enable(struct amdgpu_device *adev)
return r;
/* Flush HDP after it is initialized */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 291549765c38..f1dc9e50d67e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1504,7 +1504,6 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.umc_inst_num = UMC_V12_0_UMC_INSTANCE_NUM;
adev->umc.node_inst_num /= UMC_V12_0_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET;
- adev->umc.active_mask = adev->aid_mask;
adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
adev->umc.ras = &umc_v12_0_ras;
@@ -2434,7 +2433,7 @@ static int gmc_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
adev->hdp.funcs->init_registers(adev);
/* After HDP is initialized, flush HDP.*/
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
index 194026e9be33..1ca1bbe7784e 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -42,7 +42,12 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ /* We just need to read back a register to post the write.
+ * Reading back the remapped register causes problems on
+ * some platforms so just read back the memory size register.
+ */
+ if (adev->nbio.funcs->get_memsize)
+ adev->nbio.funcs->get_memsize(adev);
} else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
index d3962d469088..40705e13ca56 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
@@ -33,7 +33,12 @@ static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ /* We just need to read back a register to post the write.
+ * Reading back the remapped register causes problems on
+ * some platforms so just read back the memory size register.
+ */
+ if (adev->nbio.funcs->get_memsize)
+ adev->nbio.funcs->get_memsize(adev);
} else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
index f52552c5fa27..6b9f2e1d9d69 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
@@ -34,7 +34,17 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
if (!ring || !ring->funcs->emit_wreg) {
WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
0);
- RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ if (amdgpu_sriov_vf(adev)) {
+ /* this is fine because SR_IOV doesn't remap the register */
+ RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ } else {
+ /* We just need to read back a register to post the write.
+ * Reading back the remapped register causes problems on
+ * some platforms so just read back the memory size register.
+ */
+ if (adev->nbio.funcs->get_memsize)
+ adev->nbio.funcs->get_memsize(adev);
+ }
} else {
amdgpu_ring_emit_wreg(ring,
(adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
index 6948fe9956ce..20da813299f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
@@ -36,7 +36,12 @@ static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev,
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ /* We just need to read back a register to post the write.
+ * Reading back the remapped register causes problems on
+ * some platforms so just read back the memory size register.
+ */
+ if (adev->nbio.funcs->get_memsize)
+ adev->nbio.funcs->get_memsize(adev);
} else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
index 63820329f67e..f7ecdd15d528 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c
@@ -33,7 +33,12 @@ static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev,
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
- RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
+ /* We just need to read back a register to post the write.
+ * Reading back the remapped register causes problems on
+ * some platforms so just read back the memory size register.
+ */
+ if (adev->nbio.funcs->get_memsize)
+ adev->nbio.funcs->get_memsize(adev);
} else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index 88f9771c1686..b2904ee494e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -637,7 +637,7 @@ static uint64_t jpeg_v4_0_3_dec_ring_get_wptr(struct amdgpu_ring *ring)
ring->pipe ? (0x40 * ring->pipe - 0xc80) : 0);
}
-static void jpeg_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+void jpeg_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
/* JPEG engine access for HDP flush doesn't work when RRMT is enabled.
* This is a workaround to avoid any HDP flush through JPEG ring.
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
index 747a3e5f6856..a90bf370a002 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
@@ -56,6 +56,7 @@ void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
unsigned int flags);
void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr);
+void jpeg_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring);
void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count);
void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring);
void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
index 40d4c32a8c2a..f2cc11b3fd68 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
@@ -655,6 +655,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = {
.emit_ib = jpeg_v4_0_3_dec_ring_emit_ib,
.emit_fence = jpeg_v4_0_3_dec_ring_emit_fence,
.emit_vm_flush = jpeg_v4_0_3_dec_ring_emit_vm_flush,
+ .emit_hdp_flush = jpeg_v4_0_3_ring_emit_hdp_flush,
.test_ring = amdgpu_jpeg_dec_ring_test_ring,
.test_ib = amdgpu_jpeg_dec_ring_test_ib,
.insert_nop = jpeg_v4_0_3_dec_ring_nop,
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 0f808ffcab94..68bb334393bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -730,7 +730,7 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
static int mes_v11_0_set_hw_resources_1(struct amdgpu_mes *mes)
{
- int size = 128 * PAGE_SIZE;
+ int size = 128 * AMDGPU_GPU_PAGE_SIZE;
int ret = 0;
struct amdgpu_device *adev = mes->adev;
union MESAPI_SET_HW_RESOURCES_1 mes_set_hw_res_pkt;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
index 9689e2b5d4e5..2adee2b94c37 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
@@ -172,6 +172,30 @@ static void mmhub_v1_7_init_tlb_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL, tmp);
}
+/* Set snoop bit for SDMA so that SDMA writes probe-invalidates RW lines */
+static void mmhub_v1_7_init_snoop_override_regs(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+ int i;
+ uint32_t distance = regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE -
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE;
+
+ for (i = 0; i < 5; i++) { /* DAGB instances */
+ tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, i * distance);
+ tmp |= (1 << 15); /* SDMA client is BIT15 */
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, i * distance, tmp);
+
+ tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, i * distance);
+ tmp |= (1 << 15);
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, i * distance, tmp);
+ }
+
+}
+
static void mmhub_v1_7_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
@@ -337,6 +361,7 @@ static int mmhub_v1_7_gart_enable(struct amdgpu_device *adev)
mmhub_v1_7_init_system_aperture_regs(adev);
mmhub_v1_7_init_tlb_regs(adev);
mmhub_v1_7_init_cache_regs(adev);
+ mmhub_v1_7_init_snoop_override_regs(adev);
mmhub_v1_7_enable_system_domain(adev);
mmhub_v1_7_disable_identity_aperture(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
index e646e5cef0a2..ce013a715b86 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
@@ -213,6 +213,32 @@ static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev)
}
}
+/* Set snoop bit for SDMA so that SDMA writes probe-invalidates RW lines */
+static void mmhub_v1_8_init_snoop_override_regs(struct amdgpu_device *adev)
+{
+ uint32_t tmp, inst_mask;
+ int i, j;
+ uint32_t distance = regDAGB1_WRCLI_GPU_SNOOP_OVERRIDE -
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE;
+
+ inst_mask = adev->aid_mask;
+ for_each_inst(i, inst_mask) {
+ for (j = 0; j < 5; j++) { /* DAGB instances */
+ tmp = RREG32_SOC15_OFFSET(MMHUB, i,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, j * distance);
+ tmp |= (1 << 15); /* SDMA client is BIT15 */
+ WREG32_SOC15_OFFSET(MMHUB, i,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE, j * distance, tmp);
+
+ tmp = RREG32_SOC15_OFFSET(MMHUB, i,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, j * distance);
+ tmp |= (1 << 15);
+ WREG32_SOC15_OFFSET(MMHUB, i,
+ regDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE, j * distance, tmp);
+ }
+ }
+}
+
static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp, inst_mask;
@@ -418,6 +444,7 @@ static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev)
mmhub_v1_8_init_system_aperture_regs(adev);
mmhub_v1_8_init_tlb_regs(adev);
mmhub_v1_8_init_cache_regs(adev);
+ mmhub_v1_8_init_snoop_override_regs(adev);
mmhub_v1_8_enable_system_domain(adev);
mmhub_v1_8_disable_identity_aperture(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index ff1b58e44689..fe0710b55c3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -198,6 +198,36 @@ static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid)
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
}
+/* Set snoop bit for SDMA so that SDMA writes probe-invalidates RW lines */
+static void mmhub_v9_4_init_snoop_override_regs(struct amdgpu_device *adev, int hubid)
+{
+ uint32_t tmp;
+ int i;
+ uint32_t distance = mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE -
+ mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE;
+ uint32_t huboffset = hubid * MMHUB_INSTANCE_REGISTER_OFFSET;
+
+ for (i = 0; i < 5 - (2 * hubid); i++) {
+ /* DAGB instances 0 to 4 are in hub0 and 5 to 7 are in hub1 */
+ tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
+ mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE,
+ huboffset + i * distance);
+ tmp |= (1 << 15); /* SDMA client is BIT15 */
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE,
+ huboffset + i * distance, tmp);
+
+ tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
+ mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE,
+ huboffset + i * distance);
+ tmp |= (1 << 15);
+ WREG32_SOC15_OFFSET(MMHUB, 0,
+ mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE,
+ huboffset + i * distance, tmp);
+ }
+
+}
+
static void mmhub_v9_4_init_cache_regs(struct amdgpu_device *adev, int hubid)
{
uint32_t tmp;
@@ -392,6 +422,7 @@ static int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev))
mmhub_v9_4_init_cache_regs(adev, i);
+ mmhub_v9_4_init_snoop_override_regs(adev, i);
mmhub_v9_4_enable_system_domain(adev, i);
if (!amdgpu_sriov_vf(adev))
mmhub_v9_4_disable_identity_aperture(adev, i);
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
index 41421da63a08..a11f556b3ff1 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
@@ -361,7 +361,7 @@ static void nbio_v7_11_get_clockgating_state(struct amdgpu_device *adev,
*flags |= AMD_CG_SUPPORT_BIF_LS;
}
-#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
+#define MMIO_REG_HOLE_OFFSET 0x44000
static void nbio_v7_11_set_reg_remap(struct amdgpu_device *adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 95c609317a8d..efca7dc27d68 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -141,23 +141,23 @@ static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = {
};
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn1[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 2395f1856962..e77a467af7ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -532,7 +532,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
}
memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
vfree(buf);
drm_dev_exit(idx);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index cc621064610f..afdf8ce3b4c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -610,7 +610,7 @@ static int psp_v13_0_memory_training(struct psp_context *psp, uint32_t ops)
}
memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
vfree(buf);
drm_dev_exit(idx);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
index 4d33c95a5116..89f6c06946c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
@@ -488,7 +488,7 @@ static int psp_v14_0_memory_training(struct psp_context *psp, uint32_t ops)
}
memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
vfree(buf);
drm_dev_exit(idx);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index e98fb3fa36a8..6e09613de8cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -604,12 +604,10 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
{
/* Will reset for the following suspend abort cases.
- * 1) Only reset on APU side, dGPU hasn't checked yet.
- * 2) S3 suspend aborted in the normal S3 suspend or
- * performing pm core test.
+ * 1) S3 suspend aborted in the normal S3 suspend
+ * 2) S3 suspend aborted in performing pm core test.
*/
- if (adev->flags & AMD_IS_APU && adev->in_s3 &&
- !pm_resume_via_firmware())
+ if (adev->in_s3 && !pm_resume_via_firmware())
return true;
else
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 62ad67d0b598..c66cff399f63 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -117,23 +117,17 @@ static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn1 = {
};
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn0[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn1[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index e42cfc731ad8..f40737d27cb0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -39,6 +39,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x1fd
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x503
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index b518202955ca..2431e1914a8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -39,6 +39,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 63ddd4cca910..02c2defcf91e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -40,6 +40,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 00551d6f0370..090794457339 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -46,6 +46,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
#define VCN_HARVEST_MMSCH 0
@@ -582,7 +583,8 @@ static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
/* VCN global tiling registers */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
- VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
+ adev->gfx.config.gb_addr_config, 0, indirect);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index ecdc027f8220..855da1149c5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -31,6 +31,7 @@
#include "soc15d.h"
#include "soc15_hw_ip.h"
#include "vcn_v2_0.h"
+#include "vcn_v4_0_3.h"
#include "mmsch_v4_0_3.h"
#include "vcn/vcn_4_0_3_offset.h"
@@ -44,6 +45,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
+#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = {
SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
@@ -1461,8 +1463,8 @@ static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring)
regUVD_RB_WPTR);
}
-static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
- uint32_t val, uint32_t mask)
+void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
{
/* Use normalized offsets when required */
if (vcn_v4_0_3_normalizn_reqd(ring->adev))
@@ -1474,7 +1476,8 @@ static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t
amdgpu_ring_write(ring, val);
}
-static void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
+void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val)
{
/* Use normalized offsets when required */
if (vcn_v4_0_3_normalizn_reqd(ring->adev))
@@ -1485,8 +1488,8 @@ static void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg
amdgpu_ring_write(ring, val);
}
-static void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned int vmid, uint64_t pd_addr)
+void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned int vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
@@ -1498,7 +1501,7 @@ static void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
lower_32_bits(pd_addr), 0xffffffff);
}
-static void vcn_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+void vcn_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
/* VCN engine access for HDP flush doesn't work when RRMT is enabled.
* This is a workaround to avoid any HDP flush through VCN ring.
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
index 0b046114373a..03572a1d0c9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
@@ -26,4 +26,13 @@
extern const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block;
+void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask);
+
+void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val);
+void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned int vmid, uint64_t pd_addr);
+void vcn_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring);
+
#endif /* __VCN_V4_0_3_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 23d3c16c9d9f..c9761d27fd61 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -46,6 +46,7 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 (0x48300 + 0x38000)
+#define VCN1_AON_SOC_ADDRESS_3_0 (0x48000 + 0x38000)
#define VCN_HARVEST_MMSCH 0
@@ -982,6 +983,10 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
VCN_RB1_DB_CTRL__EN_MASK);
+ /* Keeping one read-back to ensure all register writes are done, otherwise
+ * it may introduce race conditions */
+ RREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL);
+
return 0;
}
@@ -990,183 +995,182 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
* vcn_v4_0_5_start - VCN start
*
* @adev: amdgpu_device pointer
+ * @i: instance to start
*
* Start VCN block
*/
-static int vcn_v4_0_5_start(struct amdgpu_device *adev)
+static int vcn_v4_0_5_start(struct amdgpu_device *adev, int i)
{
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
- int i, j, k, r;
+ int j, k, r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, true, i);
- }
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, true, i);
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- r = vcn_v4_0_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
- continue;
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+ return vcn_v4_0_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
- /* disable VCN power gating */
- vcn_v4_0_5_disable_static_power_gating(adev, i);
-
- /* set VCN status busy */
- tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
-
- /*SW clock gating */
- vcn_v4_0_5_disable_clock_gating(adev, i);
-
- /* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
-
- /* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
-
- /* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
-
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
-
- /* setup regUVD_LMI_CTRL */
- tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
- WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
- UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
- UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
- UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
- UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
-
- /* setup regUVD_MPC_CNTL */
- tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL);
- tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
- tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
- WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp);
-
- /* setup UVD_MPC_SET_MUXA0 */
- WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0,
- ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUXB0 */
- WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0,
- ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
- (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
- (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
-
- /* setup UVD_MPC_SET_MUX */
- WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX,
- ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
- (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
- (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
-
- vcn_v4_0_5_mc_resume(adev, i);
-
- /* VCN global tiling registers */
- WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
- adev->gfx.config.gb_addr_config);
-
- /* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
-
- /* release VCPU reset to boot */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- for (j = 0; j < 10; ++j) {
- uint32_t status;
-
- for (k = 0; k < 100; ++k) {
- status = RREG32_SOC15(VCN, i, regUVD_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- if (amdgpu_emu_mode == 1)
- msleep(1);
- }
+ /* disable VCN power gating */
+ vcn_v4_0_5_disable_static_power_gating(adev, i);
+
+ /* set VCN status busy */
+ tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
+
+ /* SW clock gating */
+ vcn_v4_0_5_disable_clock_gating(adev, i);
+
+ /* enable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
+
+ /* disable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- if (amdgpu_emu_mode == 1) {
- r = -1;
- if (status & 2) {
- r = 0;
- break;
- }
- } else {
+ /* enable LMI MC and UMC channels */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+
+ /* setup regUVD_LMI_CTRL */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
+
+ /* setup regUVD_MPC_CNTL */
+ tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL);
+ tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
+ tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
+ WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp);
+
+ /* setup UVD_MPC_SET_MUXA0 */
+ WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0,
+ ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUXB0 */
+ WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0,
+ ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+ (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+ (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
+
+ /* setup UVD_MPC_SET_MUX */
+ WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX,
+ ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+ (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+ (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
+
+ vcn_v4_0_5_mc_resume(adev, i);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
+ adev->gfx.config.gb_addr_config);
+
+ /* unblock VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+
+ /* release VCPU reset to boot */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ for (j = 0; j < 10; ++j) {
+ uint32_t status;
+
+ for (k = 0; k < 100; ++k) {
+ status = RREG32_SOC15(VCN, i, regUVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(10);
+ if (amdgpu_emu_mode == 1)
+ msleep(1);
+ }
+
+ if (amdgpu_emu_mode == 1) {
+ r = -1;
+ if (status & 2) {
r = 0;
- if (status & 2)
- break;
-
- dev_err(adev->dev,
- "VCN[%d] is not responding, trying to reset VCPU!!!\n", i);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
- mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
-
- mdelay(10);
- r = -1;
+ break;
}
+ } else {
+ r = 0;
+ if (status & 2)
+ break;
+
+ dev_err(adev->dev,
+ "VCN[%d] is not responding, trying to reset VCPU!!!\n", i);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ mdelay(10);
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
+
+ mdelay(10);
+ r = -1;
}
+ }
- if (r) {
- dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
- return r;
- }
+ if (r) {
+ dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
+ return r;
+ }
- /* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
- UVD_MASTINT_EN__VCPU_EN_MASK,
- ~UVD_MASTINT_EN__VCPU_EN_MASK);
+ /* enable master interrupt */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
+ UVD_MASTINT_EN__VCPU_EN_MASK,
+ ~UVD_MASTINT_EN__VCPU_EN_MASK);
- /* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
- ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
+ /* clear the busy bit of VCN_STATUS */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
+ ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
- ring = &adev->vcn.inst[i].ring_enc[0];
- WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
- ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
- VCN_RB1_DB_CTRL__EN_MASK);
-
- WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
-
- tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
- tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
- WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
- WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
- WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
-
- tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
- WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
- ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
-
- tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
- tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
- WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
- }
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
+ ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+ WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
+
+ tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
+ WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
+ ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
+
+ tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
+ tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
+ WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
+ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
+
+ /* Keeping one read-back to ensure all register writes are done, otherwise
+ * it may introduce race conditions */
+ RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
return 0;
}
@@ -1203,88 +1207,87 @@ static void vcn_v4_0_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
* vcn_v4_0_5_stop - VCN stop
*
* @adev: amdgpu_device pointer
+ * @i: instance to stop
*
* Stop VCN block
*/
-static int vcn_v4_0_5_stop(struct amdgpu_device *adev)
+static int vcn_v4_0_5_stop(struct amdgpu_device *adev, int i)
{
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
uint32_t tmp;
- int i, r = 0;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ int r = 0;
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
- vcn_v4_0_5_stop_dpg_mode(adev, i);
- continue;
- }
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
- /* wait for vcn idle */
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
- if (r)
- return r;
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ vcn_v4_0_5_stop_dpg_mode(adev, i);
+ r = 0;
+ goto done;
+ }
- tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__READ_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_MASK |
- UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ /* wait for vcn idle */
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
+ if (r)
+ goto done;
- /* disable LMI UMC channel */
- tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
- tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
- WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
- tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
- UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
- r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
- if (r)
- return r;
+ tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__READ_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_MASK |
+ UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
+
+ /* disable LMI UMC channel */
+ tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
+ tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
+ WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
+ tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
+ UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
+ r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
+ if (r)
+ goto done;
- /* block VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
- UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
- ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
+ /* block VCPU register access */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
+ UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
+ ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
- /* reset VCPU */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ /* reset VCPU */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
- /* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
- ~(UVD_VCPU_CNTL__CLK_EN_MASK));
+ /* disable VCPU clock */
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
+ ~(UVD_VCPU_CNTL__CLK_EN_MASK));
- /* apply soft reset */
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
- tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
- WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+ /* apply soft reset */
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
+ tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
+ WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
- /* clear status */
- WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
+ /* clear status */
+ WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
- /* apply HW clock gating */
- vcn_v4_0_5_enable_clock_gating(adev, i);
+ /* apply HW clock gating */
+ vcn_v4_0_5_enable_clock_gating(adev, i);
- /* enable VCN power gating */
- vcn_v4_0_5_enable_static_power_gating(adev, i);
- }
+ /* enable VCN power gating */
+ vcn_v4_0_5_enable_static_power_gating(adev, i);
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->pm.dpm_enabled)
- amdgpu_dpm_enable_vcn(adev, false, i);
- }
+done:
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vcn(adev, false, i);
- return 0;
+ return r;
}
/**
@@ -1536,15 +1539,17 @@ static int vcn_v4_0_5_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- int ret;
+ int ret = 0, i;
if (state == adev->vcn.cur_state)
return 0;
- if (state == AMD_PG_STATE_GATE)
- ret = vcn_v4_0_5_stop(adev);
- else
- ret = vcn_v4_0_5_start(adev);
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (state == AMD_PG_STATE_GATE)
+ ret |= vcn_v4_0_5_stop(adev, i);
+ else
+ ret |= vcn_v4_0_5_start(adev, i);
+ }
if (!ret)
adev->vcn.cur_state = state;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index b6d78381ebfb..97fc3d5b1947 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -502,7 +502,8 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
/* VCN global tiling registers */
WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
- VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
+ adev->gfx.config.gb_addr_config, 0, indirect);
return;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
index 8b0b3739a537..f893a8428283 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
@@ -29,6 +29,7 @@
#include "soc15d.h"
#include "soc15_hw_ip.h"
#include "vcn_v2_0.h"
+#include "vcn_v4_0_3.h"
#include "vcn/vcn_5_0_0_offset.h"
#include "vcn/vcn_5_0_0_sh_mask.h"
@@ -65,6 +66,22 @@ static int vcn_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
return amdgpu_vcn_early_init(adev);
}
+static void vcn_v5_0_1_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
+{
+ struct amdgpu_vcn5_fw_shared *fw_shared;
+
+ fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+
+ if (fw_shared->sq.is_enabled)
+ return;
+ fw_shared->present_flag_0 =
+ cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
+ fw_shared->sq.is_enabled = 1;
+
+ if (amdgpu_vcnfw_log)
+ amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]);
+}
+
/**
* vcn_v5_0_1_sw_init - sw init for VCN block
*
@@ -95,8 +112,6 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
return r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
-
vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
@@ -111,12 +126,7 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
- fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
- fw_shared->sq.is_enabled = true;
-
- if (amdgpu_vcnfw_log)
- amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
+ vcn_v5_0_1_fw_shared_init(adev, i);
}
/* TODO: Add queue reset mask when FW fully supports it */
@@ -188,6 +198,9 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
9 * vcn_inst),
adev->vcn.inst[i].aid_id);
+ /* Re-init fw_shared, if required */
+ vcn_v5_0_1_fw_shared_init(adev, i);
+
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
@@ -893,16 +906,17 @@ static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = {
.get_rptr = vcn_v5_0_1_unified_ring_get_rptr,
.get_wptr = vcn_v5_0_1_unified_ring_get_wptr,
.set_wptr = vcn_v5_0_1_unified_ring_set_wptr,
- .emit_frame_size =
- SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
- SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
- 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
- 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
- 1, /* vcn_v2_0_enc_ring_insert_end */
+ .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
+ 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
+ 5 +
+ 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
+ 1, /* vcn_v2_0_enc_ring_insert_end */
.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
.emit_ib = vcn_v2_0_enc_ring_emit_ib,
.emit_fence = vcn_v2_0_enc_ring_emit_fence,
- .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
+ .emit_vm_flush = vcn_v4_0_3_enc_ring_emit_vm_flush,
+ .emit_hdp_flush = vcn_v4_0_3_ring_emit_hdp_flush,
.test_ring = amdgpu_vcn_enc_ring_test_ring,
.test_ib = amdgpu_vcn_unified_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
@@ -910,8 +924,8 @@ static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vcn_ring_begin_use,
.end_use = amdgpu_vcn_ring_end_use,
- .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
- .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
+ .emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg,
+ .emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 795382b55e0a..981d9adcc5e1 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -107,20 +107,30 @@ static void cik_event_interrupt_wq(struct kfd_node *dev,
kfd_signal_hw_exception_event(pasid);
else if (ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) {
+ struct kfd_process_device *pdd = NULL;
struct kfd_vm_fault_info info;
+ struct kfd_process *p;
kfd_smi_event_update_vmfault(dev, pasid);
- kfd_dqm_evict_pasid(dev->dqm, pasid);
+ p = kfd_lookup_process_by_pasid(pasid, &pdd);
+ if (!pdd)
+ return;
+
+ kfd_evict_process_device(pdd);
memset(&info, 0, sizeof(info));
amdgpu_amdkfd_gpuvm_get_vm_fault_info(dev->adev, &info);
- if (!info.page_addr && !info.status)
+ if (!info.page_addr && !info.status) {
+ kfd_unref_process(p);
return;
+ }
if (info.vmid == vmid)
- kfd_signal_vm_fault_event(dev, pasid, &info, NULL);
+ kfd_signal_vm_fault_event(pdd, &info, NULL);
else
- kfd_signal_vm_fault_event(dev, pasid, NULL, NULL);
+ kfd_signal_vm_fault_event(pdd, &info, NULL);
+
+ kfd_unref_process(p);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 33df35cab467..8c2e92378b49 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -155,8 +155,8 @@ static int kfd_open(struct inode *inode, struct file *filep)
/* filep now owns the reference returned by kfd_create_process */
filep->private_data = process;
- dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
- process->pasid, process->is_32bit_user_mode);
+ dev_dbg(kfd_device, "process pid %d opened kfd node, compat mode (32 bit) - %d\n",
+ process->lead_thread->pid, process->is_32bit_user_mode);
return 0;
}
@@ -366,8 +366,8 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
goto err_acquire_queue_buf;
}
- pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n",
- p->pasid,
+ pr_debug("Creating queue for process pid %d on gpu 0x%x\n",
+ p->lead_thread->pid,
dev->id);
err = pqm_create_queue(&p->pqm, dev, &q_properties, &queue_id,
@@ -420,9 +420,9 @@ static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
int retval;
struct kfd_ioctl_destroy_queue_args *args = data;
- pr_debug("Destroying queue id %d for pasid 0x%x\n",
+ pr_debug("Destroying queue id %d for process pid %d\n",
args->queue_id,
- p->pasid);
+ p->lead_thread->pid);
mutex_lock(&p->mutex);
@@ -478,8 +478,8 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
properties.pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF;
properties.priority = args->queue_priority;
- pr_debug("Updating queue id %d for pasid 0x%x\n",
- args->queue_id, p->pasid);
+ pr_debug("Updating queue id %d for process pid %d\n",
+ args->queue_id, p->lead_thread->pid);
mutex_lock(&p->mutex);
@@ -705,7 +705,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
struct kfd_process_device_apertures *pAperture;
int i;
- dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
+ dev_dbg(kfd_device, "get apertures for process pid %d", p->lead_thread->pid);
args->num_of_nodes = 0;
@@ -757,7 +757,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
int ret;
int i;
- dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
+ dev_dbg(kfd_device, "get apertures for process pid %d",
+ p->lead_thread->pid);
if (args->num_of_nodes == 0) {
/* Return number of nodes, so that user space can alloacate
@@ -3375,12 +3376,12 @@ static int kfd_mmio_mmap(struct kfd_node *dev, struct kfd_process *process,
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- pr_debug("pasid 0x%x mapping mmio page\n"
+ pr_debug("process pid %d mapping mmio page\n"
" target user address == 0x%08llX\n"
" physical address == 0x%08llX\n"
" vm_flags == 0x%04lX\n"
" size == 0x%04lX\n",
- process->pasid, (unsigned long long) vma->vm_start,
+ process->lead_thread->pid, (unsigned long long) vma->vm_start,
address, vma->vm_flags, PAGE_SIZE);
return io_remap_pfn_range(vma,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
index a8abc3091801..12456c61ffa5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c
@@ -204,11 +204,12 @@ bool kfd_set_dbg_ev_from_interrupt(struct kfd_node *dev,
size_t exception_data_size)
{
struct kfd_process *p;
+ struct kfd_process_device *pdd = NULL;
bool signaled_to_debugger_or_runtime = false;
- p = kfd_lookup_process_by_pasid(pasid);
+ p = kfd_lookup_process_by_pasid(pasid, &pdd);
- if (!p)
+ if (!pdd)
return false;
if (!kfd_dbg_ev_raise(trap_mask, p, dev, doorbell_id, true,
@@ -238,9 +239,8 @@ bool kfd_set_dbg_ev_from_interrupt(struct kfd_node *dev,
mutex_unlock(&p->mutex);
} else if (trap_mask & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) {
- kfd_dqm_evict_pasid(dev->dqm, p->pasid);
- kfd_signal_vm_fault_event(dev, p->pasid, NULL,
- exception_data);
+ kfd_evict_process_device(pdd);
+ kfd_signal_vm_fault_event(pdd, NULL, exception_data);
signaled_to_debugger_or_runtime = true;
}
@@ -276,8 +276,8 @@ int kfd_dbg_send_exception_to_runtime(struct kfd_process *p,
data = (struct kfd_hsa_memory_exception_data *)
pdd->vm_fault_exc_data;
- kfd_dqm_evict_pasid(pdd->dev->dqm, p->pasid);
- kfd_signal_vm_fault_event(pdd->dev, p->pasid, NULL, data);
+ kfd_evict_process_device(pdd);
+ kfd_signal_vm_fault_event(pdd, NULL, data);
error_reason &= ~KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 6cefd338f23d..bf978b368f6a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -1558,7 +1558,7 @@ bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entr
u32 cam_index;
if (entry->ih == &adev->irq.ih_soft || entry->ih == &adev->irq.ih1) {
- p = kfd_lookup_process_by_pasid(entry->pasid);
+ p = kfd_lookup_process_by_pasid(entry->pasid, NULL);
if (!p)
return true;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index ad9cb50a9fa3..35ae3c55a97f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -208,7 +208,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
return -EIO;
memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
- queue_input.process_id = qpd->pqm->process->pasid;
+ queue_input.process_id = pdd->pasid;
queue_input.page_table_base_addr = qpd->page_table_base;
queue_input.process_va_start = 0;
queue_input.process_va_end = adev->vm_manager.max_pfn - 1;
@@ -527,6 +527,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
+ struct kfd_process_device *pdd = qpd_to_pdd(qpd);
struct device *dev = dqm->dev->adev->dev;
int allocated_vmid = -1, i;
@@ -545,9 +546,9 @@ static int allocate_vmid(struct device_queue_manager *dqm,
pr_debug("vmid allocated: %d\n", allocated_vmid);
- dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
+ dqm->vmid_pasid[allocated_vmid] = pdd->pasid;
- set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
+ set_pasid_vmid_mapping(dqm, pdd->pasid, allocated_vmid);
qpd->vmid = allocated_vmid;
q->properties.vmid = allocated_vmid;
@@ -799,6 +800,11 @@ static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process
return -EOPNOTSUPP;
}
+ /* taking the VMID for that process on the safe way using PDD */
+ pdd = kfd_get_process_device_data(dev, p);
+ if (!pdd)
+ return -EFAULT;
+
/* Scan all registers in the range ATC_VMID8_PASID_MAPPING ..
* ATC_VMID15_PASID_MAPPING
* to check which VMID the current process is mapped to.
@@ -808,23 +814,19 @@ static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process
status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info
(dev->adev, vmid, &queried_pasid);
- if (status && queried_pasid == p->pasid) {
- pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n",
- vmid, p->pasid);
+ if (status && queried_pasid == pdd->pasid) {
+ pr_debug("Killing wave fronts of vmid %d and process pid %d\n",
+ vmid, p->lead_thread->pid);
break;
}
}
if (vmid > last_vmid_to_scan) {
- dev_err(dev->adev->dev, "Didn't find vmid for pasid 0x%x\n", p->pasid);
+ dev_err(dev->adev->dev, "Didn't find vmid for process pid %d\n",
+ p->lead_thread->pid);
return -EFAULT;
}
- /* taking the VMID for that process on the safe way using PDD */
- pdd = kfd_get_process_device_data(dev, p);
- if (!pdd)
- return -EFAULT;
-
reg_gfx_index.bits.sh_broadcast_writes = 1;
reg_gfx_index.bits.se_broadcast_writes = 1;
reg_gfx_index.bits.instance_broadcast_writes = 1;
@@ -1060,8 +1062,8 @@ static int suspend_single_queue(struct device_queue_manager *dqm,
if (q->properties.is_suspended)
return 0;
- pr_debug("Suspending PASID %u queue [%i]\n",
- pdd->process->pasid,
+ pr_debug("Suspending process pid %d queue [%i]\n",
+ pdd->process->lead_thread->pid,
q->properties.queue_id);
is_new = q->properties.exception_status & KFD_EC_MASK(EC_QUEUE_NEW);
@@ -1108,8 +1110,8 @@ static int resume_single_queue(struct device_queue_manager *dqm,
pdd = qpd_to_pdd(qpd);
- pr_debug("Restoring from suspend PASID %u queue [%i]\n",
- pdd->process->pasid,
+ pr_debug("Restoring from suspend process pid %d queue [%i]\n",
+ pdd->process->lead_thread->pid,
q->properties.queue_id);
q->properties.is_suspended = false;
@@ -1142,8 +1144,8 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
goto out;
pdd = qpd_to_pdd(qpd);
- pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
- pdd->process->pasid);
+ pr_debug_ratelimited("Evicting process pid %d queues\n",
+ pdd->process->lead_thread->pid);
pdd->last_evict_timestamp = get_jiffies_64();
/* Mark all queues as evicted. Deactivate all active queues on
@@ -1200,8 +1202,8 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
if (!pdd->drm_priv)
goto out;
- pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
- pdd->process->pasid);
+ pr_debug_ratelimited("Evicting process pid %d queues\n",
+ pdd->process->lead_thread->pid);
/* Mark all queues as evicted. Deactivate all active queues on
* the qpd.
@@ -1261,8 +1263,8 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
goto out;
}
- pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
- pdd->process->pasid);
+ pr_debug_ratelimited("Restoring process pid %d queues\n",
+ pdd->process->lead_thread->pid);
/* Update PD Base in QPD */
qpd->page_table_base = pd_base;
@@ -1345,8 +1347,8 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
if (!pdd->drm_priv)
goto vm_not_acquired;
- pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
- pdd->process->pasid);
+ pr_debug_ratelimited("Restoring process pid %d queues\n",
+ pdd->process->lead_thread->pid);
/* Update PD Base in QPD */
qpd->page_table_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
@@ -2137,8 +2139,8 @@ static void set_queue_as_reset(struct device_queue_manager *dqm, struct queue *q
{
struct kfd_process_device *pdd = qpd_to_pdd(qpd);
- dev_err(dqm->dev->adev->dev, "queue id 0x%0x at pasid 0x%0x is reset\n",
- q->properties.queue_id, q->process->pasid);
+ dev_err(dqm->dev->adev->dev, "queue id 0x%0x at pasid %d is reset\n",
+ q->properties.queue_id, pdd->process->lead_thread->pid);
pdd->has_reset_queue = true;
if (q->properties.is_active) {
@@ -2491,14 +2493,6 @@ failed_try_destroy_debugged_queue:
return retval;
}
-/*
- * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
- * stay in user mode.
- */
-#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
-/* APE1 limit is inclusive and 64K aligned. */
-#define APE1_LIMIT_ALIGNMENT 0xFFFF
-
static bool set_cache_memory_policy(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
@@ -2513,34 +2507,6 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
dqm_lock(dqm);
- if (alternate_aperture_size == 0) {
- /* base > limit disables APE1 */
- qpd->sh_mem_ape1_base = 1;
- qpd->sh_mem_ape1_limit = 0;
- } else {
- /*
- * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
- * SH_MEM_APE1_BASE[31:0], 0x0000 }
- * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
- * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
- * Verify that the base and size parameters can be
- * represented in this format and convert them.
- * Additionally restrict APE1 to user-mode addresses.
- */
-
- uint64_t base = (uintptr_t)alternate_aperture_base;
- uint64_t limit = base + alternate_aperture_size - 1;
-
- if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
- (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
- retval = false;
- goto out;
- }
-
- qpd->sh_mem_ape1_base = base >> 16;
- qpd->sh_mem_ape1_limit = limit >> 16;
- }
-
retval = dqm->asic_ops.set_cache_memory_policy(
dqm,
qpd,
@@ -2549,6 +2515,9 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
alternate_aperture_base,
alternate_aperture_size);
+ if (retval)
+ goto out;
+
if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
program_sh_mem_settings(dqm, qpd);
@@ -2974,20 +2943,19 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm)
int kfd_dqm_suspend_bad_queue_mes(struct kfd_node *knode, u32 pasid, u32 doorbell_id)
{
- struct kfd_process_device *pdd;
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_process_device *pdd = NULL;
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid, &pdd);
struct device_queue_manager *dqm = knode->dqm;
struct device *dev = dqm->dev->adev->dev;
struct qcm_process_device *qpd;
struct queue *q = NULL;
int ret = 0;
- if (!p)
+ if (!pdd)
return -EINVAL;
dqm_lock(dqm);
- pdd = kfd_get_process_device_data(dqm->dev, p);
if (pdd) {
qpd = &pdd->qpd;
@@ -3020,6 +2988,7 @@ int kfd_dqm_suspend_bad_queue_mes(struct kfd_node *knode, u32 pasid, u32 doorbel
out:
dqm_unlock(dqm);
+ kfd_unref_process(p);
return ret;
}
@@ -3061,24 +3030,21 @@ out:
return ret;
}
-int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid)
+int kfd_evict_process_device(struct kfd_process_device *pdd)
{
- struct kfd_process_device *pdd;
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct device_queue_manager *dqm;
+ struct kfd_process *p;
int ret = 0;
- if (!p)
- return -EINVAL;
+ p = pdd->process;
+ dqm = pdd->dev->dqm;
+
WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
- pdd = kfd_get_process_device_data(dqm->dev, p);
- if (pdd) {
- if (dqm->dev->kfd->shared_resources.enable_mes)
- ret = kfd_dqm_evict_pasid_mes(dqm, &pdd->qpd);
- else
- ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
- }
- kfd_unref_process(p);
+ if (dqm->dev->kfd->shared_resources.enable_mes)
+ ret = kfd_dqm_evict_pasid_mes(dqm, &pdd->qpd);
+ else
+ ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index d4d95c7f2e5d..32bedef912b3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -27,6 +27,14 @@
#include "oss/oss_2_4_sh_mask.h"
#include "gca/gfx_7_2_sh_mask.h"
+/*
+ * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
+ * stay in user mode.
+ */
+#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
+/* APE1 limit is inclusive and 64K aligned. */
+#define APE1_LIMIT_ALIGNMENT 0xFFFF
+
static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
@@ -84,6 +92,36 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
{
uint32_t default_mtype;
uint32_t ape1_mtype;
+ unsigned int temp;
+ bool retval = true;
+
+ if (alternate_aperture_size == 0) {
+ /* base > limit disables APE1 */
+ qpd->sh_mem_ape1_base = 1;
+ qpd->sh_mem_ape1_limit = 0;
+ } else {
+ /*
+ * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
+ * SH_MEM_APE1_BASE[31:0], 0x0000 }
+ * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
+ * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
+ * Verify that the base and size parameters can be
+ * represented in this format and convert them.
+ * Additionally restrict APE1 to user-mode addresses.
+ */
+
+ uint64_t base = (uintptr_t)alternate_aperture_base;
+ uint64_t limit = base + alternate_aperture_size - 1;
+
+ if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
+ (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
+ retval = false;
+ goto out;
+ }
+
+ qpd->sh_mem_ape1_base = base >> 16;
+ qpd->sh_mem_ape1_limit = limit >> 16;
+ }
default_mtype = (default_policy == cache_policy_coherent) ?
MTYPE_NONCACHED :
@@ -97,37 +135,22 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
| ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED)
| DEFAULT_MTYPE(default_mtype)
| APE1_MTYPE(ape1_mtype);
-
- return true;
-}
-
-static int update_qpd_cik(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
-{
- struct kfd_process_device *pdd;
- unsigned int temp;
-
- pdd = qpd_to_pdd(qpd);
-
- /* check if sh_mem_config register already configured */
- if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) |
- DEFAULT_MTYPE(MTYPE_NONCACHED) |
- APE1_MTYPE(MTYPE_NONCACHED);
- qpd->sh_mem_ape1_limit = 0;
- qpd->sh_mem_ape1_base = 0;
- }
-
/* On dGPU we're always in GPUVM64 addressing mode with 64-bit
* aperture addresses.
*/
- temp = get_sh_mem_bases_nybble_64(pdd);
+ temp = get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd));
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
+out:
+ return retval;
+}
+
+static int update_qpd_cik(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
index 245a90dfc2f6..b5f5f141353b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c
@@ -31,10 +31,17 @@ static int update_qpd_v10(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
+static bool set_cache_memory_policy_v10(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size);
void device_queue_manager_init_v10(
struct device_queue_manager_asic_ops *asic_ops)
{
+ asic_ops->set_cache_memory_policy = set_cache_memory_policy_v10;
asic_ops->update_qpd = update_qpd_v10;
asic_ops->init_sdma_vm = init_sdma_vm_v10;
asic_ops->mqd_manager_init = mqd_manager_init_v10;
@@ -49,27 +56,27 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
private_base;
}
-static int update_qpd_v10(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
+static bool set_cache_memory_policy_v10(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size)
{
- struct kfd_process_device *pdd;
-
- pdd = qpd_to_pdd(qpd);
-
- /* check if sh_mem_config register already configured */
- if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
- SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
- (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
- qpd->sh_mem_ape1_limit = 0;
- qpd->sh_mem_ape1_base = 0;
- }
-
- qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd);
+ qpd->sh_mem_config = (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
+ (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
+ qpd->sh_mem_ape1_limit = 0;
+ qpd->sh_mem_ape1_base = 0;
+ qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd));
pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases);
+ return true;
+}
+static int update_qpd_v10(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c
index 2e129da7acb4..f436878d0d62 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c
@@ -30,10 +30,17 @@ static int update_qpd_v11(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm_v11(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
+static bool set_cache_memory_policy_v11(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size);
void device_queue_manager_init_v11(
struct device_queue_manager_asic_ops *asic_ops)
{
+ asic_ops->set_cache_memory_policy = set_cache_memory_policy_v11;
asic_ops->update_qpd = update_qpd_v11;
asic_ops->init_sdma_vm = init_sdma_vm_v11;
asic_ops->mqd_manager_init = mqd_manager_init_v11;
@@ -48,28 +55,28 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
private_base;
}
-static int update_qpd_v11(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
+static bool set_cache_memory_policy_v11(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size)
{
- struct kfd_process_device *pdd;
-
- pdd = qpd_to_pdd(qpd);
-
- /* check if sh_mem_config register already configured */
- if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
- SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
- (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
-
- qpd->sh_mem_ape1_limit = 0;
- qpd->sh_mem_ape1_base = 0;
- }
+ qpd->sh_mem_config = (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
+ (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
- qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd);
+ qpd->sh_mem_ape1_limit = 0;
+ qpd->sh_mem_ape1_base = 0;
+ qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd));
pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases);
+ return true;
+}
+static int update_qpd_v11(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c
index 4f3295b29dfb..62ca1c8fcbaf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c
@@ -30,10 +30,17 @@ static int update_qpd_v12(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm_v12(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
+static bool set_cache_memory_policy_v12(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size);
void device_queue_manager_init_v12(
struct device_queue_manager_asic_ops *asic_ops)
{
+ asic_ops->set_cache_memory_policy = set_cache_memory_policy_v12;
asic_ops->update_qpd = update_qpd_v12;
asic_ops->init_sdma_vm = init_sdma_vm_v12;
asic_ops->mqd_manager_init = mqd_manager_init_v12;
@@ -48,28 +55,28 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
private_base;
}
-static int update_qpd_v12(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
+static bool set_cache_memory_policy_v12(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size)
{
- struct kfd_process_device *pdd;
-
- pdd = qpd_to_pdd(qpd);
-
- /* check if sh_mem_config register already configured */
- if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
- SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
- (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
-
- qpd->sh_mem_ape1_limit = 0;
- qpd->sh_mem_ape1_base = 0;
- }
+ qpd->sh_mem_config = (SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
+ (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
- qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd);
+ qpd->sh_mem_ape1_limit = 0;
+ qpd->sh_mem_ape1_base = 0;
+ qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd));
pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases);
+ return true;
+}
+static int update_qpd_v12(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
index 67137e674f1d..d85eadaa1e11 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
@@ -30,10 +30,17 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
+static bool set_cache_memory_policy_v9(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size);
void device_queue_manager_init_v9(
struct device_queue_manager_asic_ops *asic_ops)
{
+ asic_ops->set_cache_memory_policy = set_cache_memory_policy_v9;
asic_ops->update_qpd = update_qpd_v9;
asic_ops->init_sdma_vm = init_sdma_vm_v9;
asic_ops->mqd_manager_init = mqd_manager_init_v9;
@@ -48,10 +55,36 @@ static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
private_base;
}
+static bool set_cache_memory_policy_v9(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ enum cache_policy default_policy,
+ enum cache_policy alternate_policy,
+ void __user *alternate_aperture_base,
+ uint64_t alternate_aperture_size)
+{
+ qpd->sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
+
+ if (dqm->dev->kfd->noretry)
+ qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
+
+ if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3) ||
+ KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4))
+ qpd->sh_mem_config |= (1 << SH_MEM_CONFIG__F8_MODE__SHIFT);
+
+ qpd->sh_mem_ape1_limit = 0;
+ qpd->sh_mem_ape1_base = 0;
+ qpd->sh_mem_bases = compute_sh_mem_bases_64bit(qpd_to_pdd(qpd));
+
+ pr_debug("sh_mem_bases 0x%X sh_mem_config 0x%X\n", qpd->sh_mem_bases,
+ qpd->sh_mem_config);
+ return true;
+}
+
static int update_qpd_v9(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
- struct kfd_process_device *pdd;
+ struct kfd_process_device *pdd = qpd_to_pdd(qpd);
pdd = qpd_to_pdd(qpd);
@@ -64,8 +97,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3) ||
- KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4) ||
- KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 5, 0))
+ KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 4))
qpd->sh_mem_config |=
(1 << SH_MEM_CONFIG__F8_MODE__SHIFT);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index b291ee0fab94..320518f41890 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -27,6 +27,14 @@
#include "gca/gfx_8_0_sh_mask.h"
#include "oss/oss_3_0_sh_mask.h"
+/*
+ * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
+ * stay in user mode.
+ */
+#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
+/* APE1 limit is inclusive and 64K aligned. */
+#define APE1_LIMIT_ALIGNMENT 0xFFFF
+
static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
@@ -85,6 +93,36 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
{
uint32_t default_mtype;
uint32_t ape1_mtype;
+ unsigned int temp;
+ bool retval = true;
+
+ if (alternate_aperture_size == 0) {
+ /* base > limit disables APE1 */
+ qpd->sh_mem_ape1_base = 1;
+ qpd->sh_mem_ape1_limit = 0;
+ } else {
+ /*
+ * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
+ * SH_MEM_APE1_BASE[31:0], 0x0000 }
+ * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
+ * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
+ * Verify that the base and size parameters can be
+ * represented in this format and convert them.
+ * Additionally restrict APE1 to user-mode addresses.
+ */
+
+ uint64_t base = (uintptr_t)alternate_aperture_base;
+ uint64_t limit = base + alternate_aperture_size - 1;
+
+ if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
+ (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
+ retval = false;
+ goto out;
+ }
+
+ qpd->sh_mem_ape1_base = base >> 16;
+ qpd->sh_mem_ape1_limit = limit >> 16;
+ }
default_mtype = (default_policy == cache_policy_coherent) ?
MTYPE_UC :
@@ -100,40 +138,21 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT;
- return true;
-}
-
-static int update_qpd_vi(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
-{
- struct kfd_process_device *pdd;
- unsigned int temp;
-
- pdd = qpd_to_pdd(qpd);
-
- /* check if sh_mem_config register already configured */
- if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
- SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
- MTYPE_UC <<
- SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
- MTYPE_UC <<
- SH_MEM_CONFIG__APE1_MTYPE__SHIFT;
-
- qpd->sh_mem_ape1_limit = 0;
- qpd->sh_mem_ape1_base = 0;
- }
-
/* On dGPU we're always in GPUVM64 addressing mode with 64-bit
* aperture addresses.
*/
- temp = get_sh_mem_bases_nybble_64(pdd);
+ temp = get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd));
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
pr_debug("sh_mem_bases nybble: 0x%X and register 0x%X\n",
temp, qpd->sh_mem_bases);
+out:
+ return retval;
+}
+static int update_qpd_vi(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index d075f24e5f9f..fecdb6794075 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -727,7 +727,7 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
* to process context, kfd_process could attempt to exit while we are
* running so the lookup function increments the process ref count.
*/
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid, NULL);
if (!p)
return; /* Presumably process exited. */
@@ -1139,8 +1139,8 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
if (type == KFD_EVENT_TYPE_MEMORY) {
dev_warn(kfd_device,
- "Sending SIGSEGV to process %d (pasid 0x%x)",
- p->lead_thread->pid, p->pasid);
+ "Sending SIGSEGV to process pid %d",
+ p->lead_thread->pid);
send_sig(SIGSEGV, p->lead_thread, 0);
}
@@ -1148,13 +1148,13 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
if (send_signal) {
if (send_sigterm) {
dev_warn(kfd_device,
- "Sending SIGTERM to process %d (pasid 0x%x)",
- p->lead_thread->pid, p->pasid);
+ "Sending SIGTERM to process pid %d",
+ p->lead_thread->pid);
send_sig(SIGTERM, p->lead_thread, 0);
} else {
dev_err(kfd_device,
- "Process %d (pasid 0x%x) got unhandled exception",
- p->lead_thread->pid, p->pasid);
+ "Process pid %d got unhandled exception",
+ p->lead_thread->pid);
}
}
@@ -1168,7 +1168,7 @@ void kfd_signal_hw_exception_event(u32 pasid)
* to process context, kfd_process could attempt to exit while we are
* running so the lookup function increments the process ref count.
*/
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid, NULL);
if (!p)
return; /* Presumably process exited. */
@@ -1177,22 +1177,20 @@ void kfd_signal_hw_exception_event(u32 pasid)
kfd_unref_process(p);
}
-void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid,
+void kfd_signal_vm_fault_event(struct kfd_process_device *pdd,
struct kfd_vm_fault_info *info,
struct kfd_hsa_memory_exception_data *data)
{
struct kfd_event *ev;
uint32_t id;
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_process *p = pdd->process;
struct kfd_hsa_memory_exception_data memory_exception_data;
int user_gpu_id;
- if (!p)
- return; /* Presumably process exited. */
-
- user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
+ user_gpu_id = kfd_process_get_user_gpu_id(p, pdd->dev->id);
if (unlikely(user_gpu_id == -EINVAL)) {
- WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
+ WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n",
+ pdd->dev->id);
return;
}
@@ -1229,7 +1227,6 @@ void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid,
}
rcu_read_unlock();
- kfd_unref_process(p);
}
void kfd_signal_reset_event(struct kfd_node *dev)
@@ -1264,7 +1261,8 @@ void kfd_signal_reset_event(struct kfd_node *dev)
}
if (unlikely(!pdd)) {
- WARN_ONCE(1, "Could not get device data from pasid:0x%x\n", p->pasid);
+ WARN_ONCE(1, "Could not get device data from process pid:%d\n",
+ p->lead_thread->pid);
continue;
}
@@ -1273,8 +1271,15 @@ void kfd_signal_reset_event(struct kfd_node *dev)
if (dev->dqm->detect_hang_count) {
struct amdgpu_task_info *ti;
+ struct amdgpu_fpriv *drv_priv;
+
+ if (unlikely(amdgpu_file_to_fpriv(pdd->drm_file, &drv_priv))) {
+ WARN_ONCE(1, "Could not get vm for device %x from pid:%d\n",
+ dev->id, p->lead_thread->pid);
+ continue;
+ }
- ti = amdgpu_vm_get_task_info_pasid(dev->adev, p->pasid);
+ ti = amdgpu_vm_get_task_info_vm(&drv_priv->vm);
if (ti) {
dev_err(dev->adev->dev,
"Queues reset on process %s tid %d thread %s pid %d\n",
@@ -1311,7 +1316,7 @@ void kfd_signal_reset_event(struct kfd_node *dev)
void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid)
{
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid, NULL);
struct kfd_hsa_memory_exception_data memory_exception_data;
struct kfd_hsa_hw_exception_data hw_exception_data;
struct kfd_event *ev;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
index b3f988b275a8..c5f97e6e36ff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
@@ -194,7 +194,7 @@ static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
enum amdgpu_ras_block block = 0;
int ret = -EINVAL;
uint32_t reset = 0;
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid, NULL);
if (!p)
return;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 0cb5c582ce7d..b8a91bf4ef30 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -146,7 +146,7 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
{
enum amdgpu_ras_block block = 0;
uint32_t reset = 0;
- struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid, NULL);
enum ras_event_type type = RAS_EVENT_TYPE_POISON_CONSUMPTION;
u64 event_id;
int old_poison, ret;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index 1f9f5bfeaf86..d56525201155 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -47,7 +47,7 @@ static int pm_map_process_v9(struct packet_manager *pm,
packet->bitfields2.exec_cleaner_shader = 1;
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
- packet->bitfields2.pasid = qpd->pqm->process->pasid;
+ packet->bitfields2.pasid = pdd->pasid;
packet->bitfields14.gds_size = qpd->gds_size & 0x3F;
packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF;
packet->bitfields14.num_gws = (qpd->mapped_gws_queue) ? qpd->num_gws : 0;
@@ -106,7 +106,7 @@ static int pm_map_process_aldebaran(struct packet_manager *pm,
packet->bitfields2.exec_cleaner_shader = 1;
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
- packet->bitfields2.pasid = qpd->pqm->process->pasid;
+ packet->bitfields2.pasid = pdd->pasid;
packet->bitfields14.gds_size = qpd->gds_size & 0x3F;
packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF;
packet->bitfields14.num_gws = (qpd->mapped_gws_queue) ? qpd->num_gws : 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
index c1199d06d131..347c86e1c378 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
@@ -42,6 +42,7 @@ unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size)
static int pm_map_process_vi(struct packet_manager *pm, uint32_t *buffer,
struct qcm_process_device *qpd)
{
+ struct kfd_process_device *pdd = qpd_to_pdd(qpd);
struct pm4_mes_map_process *packet;
packet = (struct pm4_mes_map_process *)buffer;
@@ -52,7 +53,7 @@ static int pm_map_process_vi(struct packet_manager *pm, uint32_t *buffer,
sizeof(struct pm4_mes_map_process));
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 10;
- packet->bitfields2.pasid = qpd->pqm->process->pasid;
+ packet->bitfields2.pasid = pdd->pasid;
packet->bitfields3.page_table_base = qpd->page_table_base;
packet->bitfields10.gds_size = qpd->gds_size;
packet->bitfields10.num_gws = qpd->num_gws;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index d8cd913aa772..0a99c5c9cadc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -851,6 +851,8 @@ struct kfd_process_device {
/* Tracks queue reset status */
bool has_reset_queue;
+
+ u32 pasid;
};
#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
@@ -910,8 +912,6 @@ struct kfd_process {
/* We want to receive a notification when the mm_struct is destroyed */
struct mmu_notifier mmu_notifier;
- u32 pasid;
-
/*
* Array of kfd_process_device pointers,
* one for each device the process is using.
@@ -1039,7 +1039,8 @@ void kfd_process_destroy_wq(void);
void kfd_cleanup_processes(void);
struct kfd_process *kfd_create_process(struct task_struct *thread);
struct kfd_process *kfd_get_process(const struct task_struct *task);
-struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid);
+struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid,
+ struct kfd_process_device **pdd);
struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id);
@@ -1337,7 +1338,7 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm);
struct kernel_queue *kernel_queue_init(struct kfd_node *dev,
enum kfd_queue_type type);
void kernel_queue_uninit(struct kernel_queue *kq);
-int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid);
+int kfd_evict_process_device(struct kfd_process_device *pdd);
int kfd_dqm_suspend_bad_queue_mes(struct kfd_node *knode, u32 pasid, u32 doorbell_id);
/* Process Queue Manager */
@@ -1492,7 +1493,7 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
int kfd_get_num_events(struct kfd_process *p);
int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
-void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid,
+void kfd_signal_vm_fault_event(struct kfd_process_device *pdd,
struct kfd_vm_fault_info *info,
struct kfd_hsa_memory_exception_data *data);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index c3f2c0428e01..7c0c24732481 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -283,8 +283,8 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
cu_cnt = 0;
proc = pdd->process;
if (pdd->qpd.queue_count == 0) {
- pr_debug("Gpu-Id: %d has no active queues for process %d\n",
- dev->id, proc->pasid);
+ pr_debug("Gpu-Id: %d has no active queues for process pid %d\n",
+ dev->id, (int)proc->lead_thread->pid);
return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
}
@@ -328,12 +328,9 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
- if (strcmp(attr->name, "pasid") == 0) {
- struct kfd_process *p = container_of(attr, struct kfd_process,
- attr_pasid);
-
- return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
- } else if (strncmp(attr->name, "vram_", 5) == 0) {
+ if (strcmp(attr->name, "pasid") == 0)
+ return snprintf(buffer, PAGE_SIZE, "%d\n", 0);
+ else if (strncmp(attr->name, "vram_", 5) == 0) {
struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
attr_vram);
return snprintf(buffer, PAGE_SIZE, "%llu\n", atomic64_read(&pdd->vram_usage));
@@ -842,6 +839,14 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
return ERR_PTR(-EINVAL);
}
+ /* If the process just called exec(3), it is possible that the
+ * cleanup of the kfd_process (following the release of the mm
+ * of the old process image) is still in the cleanup work queue.
+ * Make sure to drain any job before trying to recreate any
+ * resource for this process.
+ */
+ flush_workqueue(kfd_process_wq);
+
/*
* take kfd processes mutex before starting of process creation
* so there won't be a case where two threads of the same process
@@ -862,14 +867,6 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
if (process) {
pr_debug("Process already found\n");
} else {
- /* If the process just called exec(3), it is possible that the
- * cleanup of the kfd_process (following the release of the mm
- * of the old process image) is still in the cleanup work queue.
- * Make sure to drain any job before trying to recreate any
- * resource for this process.
- */
- flush_workqueue(kfd_process_wq);
-
process = create_process(thread);
if (IS_ERR(process))
goto out;
@@ -1057,17 +1054,13 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
- pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
- pdd->dev->id, p->pasid);
-
+ pr_debug("Releasing pdd (topology id %d, for pid %d)\n",
+ pdd->dev->id, p->lead_thread->pid);
kfd_process_device_destroy_cwsr_dgpu(pdd);
kfd_process_device_destroy_ib_mem(pdd);
- if (pdd->drm_file) {
- amdgpu_amdkfd_gpuvm_release_process_vm(
- pdd->dev->adev, pdd->drm_priv);
+ if (pdd->drm_file)
fput(pdd->drm_file);
- }
if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
@@ -1191,7 +1184,6 @@ static void kfd_process_wq_release(struct work_struct *work)
kfd_event_free_process(p);
- kfd_pasid_free(p->pasid);
mutex_destroy(&p->mutex);
put_task_struct(p->lead_thread);
@@ -1542,12 +1534,6 @@ static struct kfd_process *create_process(const struct task_struct *thread)
atomic_set(&process->debugged_process_count, 0);
sema_init(&process->runtime_enable_sema, 0);
- process->pasid = kfd_pasid_alloc();
- if (process->pasid == 0) {
- err = -ENOSPC;
- goto err_alloc_pasid;
- }
-
err = pqm_init(&process->pqm, process);
if (err != 0)
goto err_process_pqm_init;
@@ -1601,8 +1587,6 @@ err_init_svm_range_list:
err_init_apertures:
pqm_uninit(&process->pqm);
err_process_pqm_init:
- kfd_pasid_free(process->pasid);
-err_alloc_pasid:
kfd_event_free_process(process);
err_event_init:
mutex_destroy(&process->mutex);
@@ -1721,15 +1705,19 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
if (ret)
goto err_init_cwsr;
- ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, avm, p->pasid);
- if (ret)
- goto err_set_pasid;
+ if (unlikely(!avm->pasid)) {
+ dev_warn(pdd->dev->adev->dev, "WARN: vm %p has no pasid associated",
+ avm);
+ ret = -EINVAL;
+ goto err_get_pasid;
+ }
+ pdd->pasid = avm->pasid;
pdd->drm_file = drm_file;
return 0;
-err_set_pasid:
+err_get_pasid:
kfd_process_device_destroy_cwsr_dgpu(pdd);
err_init_cwsr:
kfd_process_device_destroy_ib_mem(pdd);
@@ -1815,25 +1803,50 @@ void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
idr_remove(&pdd->alloc_idr, handle);
}
-/* This increments the process->ref counter. */
-struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid)
+static struct kfd_process_device *kfd_lookup_process_device_by_pasid(u32 pasid)
{
- struct kfd_process *p, *ret_p = NULL;
+ struct kfd_process_device *ret_p = NULL;
+ struct kfd_process *p;
unsigned int temp;
-
- int idx = srcu_read_lock(&kfd_processes_srcu);
+ int i;
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
- if (p->pasid == pasid) {
- kref_get(&p->ref);
- ret_p = p;
- break;
+ for (i = 0; i < p->n_pdds; i++) {
+ if (p->pdds[i]->pasid == pasid) {
+ ret_p = p->pdds[i];
+ break;
+ }
}
+ if (ret_p)
+ break;
+ }
+ return ret_p;
+}
+
+/* This increments the process->ref counter. */
+struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid,
+ struct kfd_process_device **pdd)
+{
+ struct kfd_process_device *ret_p;
+
+ int idx = srcu_read_lock(&kfd_processes_srcu);
+
+ ret_p = kfd_lookup_process_device_by_pasid(pasid);
+ if (ret_p) {
+ if (pdd)
+ *pdd = ret_p;
+ kref_get(&ret_p->process->ref);
+
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+ return ret_p->process;
}
srcu_read_unlock(&kfd_processes_srcu, idx);
- return ret_p;
+ if (pdd)
+ *pdd = NULL;
+
+ return NULL;
}
/* This increments the process->ref counter. */
@@ -1989,7 +2002,7 @@ static void evict_process_worker(struct work_struct *work)
*/
p = container_of(dwork, struct kfd_process, eviction_work);
- pr_debug("Started evicting pasid 0x%x\n", p->pasid);
+ pr_debug("Started evicting process pid %d\n", p->lead_thread->pid);
ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM);
if (!ret) {
/* If another thread already signaled the eviction fence,
@@ -2001,9 +2014,9 @@ static void evict_process_worker(struct work_struct *work)
msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
kfd_process_restore_queues(p);
- pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
+ pr_debug("Finished evicting process pid %d\n", p->lead_thread->pid);
} else
- pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
+ pr_err("Failed to evict queues of process pid %d\n", p->lead_thread->pid);
}
static int restore_process_helper(struct kfd_process *p)
@@ -2020,9 +2033,11 @@ static int restore_process_helper(struct kfd_process *p)
ret = kfd_process_restore_queues(p);
if (!ret)
- pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
+ pr_debug("Finished restoring process pid %d\n",
+ p->lead_thread->pid);
else
- pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
+ pr_err("Failed to restore queues of process pid %d\n",
+ p->lead_thread->pid);
return ret;
}
@@ -2039,7 +2054,7 @@ static void restore_process_worker(struct work_struct *work)
* lifetime of this thread, kfd_process p will be valid
*/
p = container_of(dwork, struct kfd_process, restore_work);
- pr_debug("Started restoring pasid 0x%x\n", p->pasid);
+ pr_debug("Started restoring process pasid %d\n", (int)p->lead_thread->pid);
/* Setting last_restore_timestamp before successful restoration.
* Otherwise this would have to be set by KGD (restore_process_bos)
@@ -2055,8 +2070,8 @@ static void restore_process_worker(struct work_struct *work)
ret = restore_process_helper(p);
if (ret) {
- pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
- p->pasid, PROCESS_BACK_OFF_TIME_MS);
+ pr_debug("Failed to restore BOs of process pid %d, retry after %d ms\n",
+ p->lead_thread->pid, PROCESS_BACK_OFF_TIME_MS);
if (mod_delayed_work(kfd_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
kfd_process_restore_queues(p);
@@ -2072,7 +2087,7 @@ void kfd_suspend_all_processes(void)
WARN(debug_evictions, "Evicting all processes");
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND))
- pr_err("Failed to suspend process 0x%x\n", p->pasid);
+ pr_err("Failed to suspend process pid %d\n", p->lead_thread->pid);
signal_eviction_fence(p);
}
srcu_read_unlock(&kfd_processes_srcu, idx);
@@ -2086,8 +2101,8 @@ int kfd_resume_all_processes(void)
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
if (restore_process_helper(p)) {
- pr_err("Restore process %d failed during resume\n",
- p->pasid);
+ pr_err("Restore process pid %d failed during resume\n",
+ p->lead_thread->pid);
ret = -EFAULT;
}
}
@@ -2142,7 +2157,7 @@ int kfd_process_drain_interrupts(struct kfd_process_device *pdd)
memset(irq_drain_fence, 0, sizeof(irq_drain_fence));
irq_drain_fence[0] = (KFD_IRQ_FENCE_SOURCEID << 8) |
KFD_IRQ_FENCE_CLIENTID;
- irq_drain_fence[3] = pdd->process->pasid;
+ irq_drain_fence[3] = pdd->pasid;
/*
* For GFX 9.4.3/9.5.0, send the NodeId also in IH cookie DW[3]
@@ -2173,7 +2188,7 @@ void kfd_process_close_interrupt_drain(unsigned int pasid)
{
struct kfd_process *p;
- p = kfd_lookup_process_by_pasid(pasid);
+ p = kfd_lookup_process_by_pasid(pasid, NULL);
if (!p)
return;
@@ -2294,8 +2309,8 @@ int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
int idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
- seq_printf(m, "Process %d PASID 0x%x:\n",
- p->lead_thread->tgid, p->pasid);
+ seq_printf(m, "Process %d PASID %d:\n",
+ p->lead_thread->tgid, p->lead_thread->pid);
mutex_lock(&p->mutex);
r = pqm_debugfs_mqds(m, &p->pqm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index d79caa1a6867..662c595ce783 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -69,8 +69,8 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
pr_debug("The new slot id %lu\n", found);
if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
- pr_info("Cannot open more queues for process with pasid 0x%x\n",
- pqm->process->pasid);
+ pr_info("Cannot open more queues for process with pid %d\n",
+ pqm->process->lead_thread->pid);
return -ENOMEM;
}
@@ -451,8 +451,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
if (retval != 0) {
- pr_err("Pasid 0x%x DQM create queue type %d failed. ret %d\n",
- pqm->process->pasid, type, retval);
+ pr_err("process pid %d DQM create queue type %d failed. ret %d\n",
+ pqm->process->lead_thread->pid, type, retval);
goto err_create_queue;
}
@@ -546,7 +546,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
if (retval) {
pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
- pqm->process->pasid,
+ pdd->pasid,
pqn->q->properties.queue_id, retval);
if (retval != -ETIME && retval != -EIO)
goto err_destroy_queue;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index d1cf9dd35290..116116a9f578 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -563,7 +563,8 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
int r;
p = container_of(prange->svms, struct kfd_process, svms);
- pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
+ pr_debug("process pid: %d svms 0x%p [0x%lx 0x%lx]\n",
+ p->lead_thread->pid, prange->svms,
prange->start, prange->last);
if (svm_range_validate_svm_bo(node, prange))
@@ -2973,7 +2974,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
return -EFAULT;
}
- p = kfd_lookup_process_by_pasid(pasid);
+ p = kfd_lookup_process_by_pasid(pasid, NULL);
if (!p) {
pr_debug("kfd process not founded pasid 0x%x\n", pasid);
return 0;
@@ -3024,7 +3025,7 @@ retry_write_locked:
/* check if this page fault time stamp is before svms->checkpoint_ts */
if (svms->checkpoint_ts[gpuidx] != 0) {
- if (amdgpu_ih_ts_after(ts, svms->checkpoint_ts[gpuidx])) {
+ if (amdgpu_ih_ts_after_or_equal(ts, svms->checkpoint_ts[gpuidx])) {
pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
r = -EAGAIN;
goto out_unlock_svms;
@@ -3239,7 +3240,8 @@ void svm_range_list_fini(struct kfd_process *p)
struct svm_range *prange;
struct svm_range *next;
- pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
+ pr_debug("process pid %d svms 0x%p\n", p->lead_thread->pid,
+ &p->svms);
cancel_delayed_work_sync(&p->svms.restore_work);
@@ -3262,7 +3264,8 @@ void svm_range_list_fini(struct kfd_process *p)
mutex_destroy(&p->svms.lock);
- pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
+ pr_debug("process pid %d svms 0x%p done\n",
+ p->lead_thread->pid, &p->svms);
}
int svm_range_list_init(struct kfd_process *p)
@@ -3625,8 +3628,8 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
bool flush_tlb;
int r, ret = 0;
- pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
- p->pasid, &p->svms, start, start + size - 1, size);
+ pr_debug("process pid %d svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
+ p->lead_thread->pid, &p->svms, start, start + size - 1, size);
r = svm_range_check_attr(p, nattr, attrs);
if (r)
@@ -3734,8 +3737,8 @@ out_unlock_range:
out:
mutex_unlock(&process_info->lock);
- pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
- &p->svms, start, start + size - 1, r);
+ pr_debug("process pid %d svms 0x%p [0x%llx 0x%llx] done, r=%d\n",
+ p->lead_thread->pid, &p->svms, start, start + size - 1, r);
return ret ? ret : r;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index ceb9fb475ef1..98317eda2cdb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1683,17 +1683,32 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
int cache_type, unsigned int cu_processor_id,
struct kfd_node *knode)
{
- unsigned int cu_sibling_map_mask;
+ unsigned int cu_sibling_map_mask = 0;
int first_active_cu;
int i, j, k, xcc, start, end;
int num_xcc = NUM_XCC(knode->xcc_mask);
struct kfd_cache_properties *pcache = NULL;
enum amdgpu_memory_partition mode;
struct amdgpu_device *adev = knode->adev;
+ bool found = false;
start = ffs(knode->xcc_mask) - 1;
end = start + num_xcc;
- cu_sibling_map_mask = cu_info->bitmap[start][0][0];
+
+ /* To find the bitmap in the first active cu in the first
+ * xcc, it is based on the assumption that evrey xcc must
+ * have at least one active cu.
+ */
+ for (i = 0; i < gfx_info->max_shader_engines && !found; i++) {
+ for (j = 0; j < gfx_info->max_sh_per_se && !found; j++) {
+ if (cu_info->bitmap[start][i % 4][j % 4]) {
+ cu_sibling_map_mask =
+ cu_info->bitmap[start][i % 4][j % 4];
+ found = true;
+ }
+ }
+ }
+
cu_sibling_map_mask &=
((1 << pcache_info[cache_type].num_cu_shared) - 1);
first_active_cu = ffs(cu_sibling_map_mask);
@@ -2000,15 +2015,12 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
dev->node_props.capability |=
HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED;
- dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED;
+ if (!amdgpu_sriov_vf(dev->gpu->adev))
+ dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED;
} else {
dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 |
HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
- if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(11, 0, 0))
- dev->node_props.capability |=
- HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED;
-
if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(12, 0, 0))
dev->node_props.capability |=
HSA_CAP_TRAP_DEBUG_PRECISE_ALU_OPERATIONS_SUPPORTED;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 80a3cbd2cbe5..4801dcde2cb3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -367,6 +367,8 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev,
static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
struct dm_crtc_state *new_state)
{
+ if (new_state->stream->adjust.timing_adjust_pending)
+ return true;
if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
return true;
else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
@@ -1912,26 +1914,6 @@ static enum dmub_ips_disable_type dm_get_default_ips_mode(
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
case IP_VERSION(3, 5, 0):
- /*
- * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to
- * cause a hard hang. A fix exists for newer PMFW.
- *
- * As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest
- * IPS state in all cases, except for s0ix and all displays off (DPMS),
- * where IPS2 is allowed.
- *
- * When checking pmfw version, use the major and minor only.
- */
- if ((adev->pm.fw_version & 0x00FFFF00) < 0x005D6300)
- ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
- else if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(11, 5, 0))
- /*
- * Other ASICs with DCN35 that have residency issues with
- * IPS2 in idle.
- * We want them to use IPS2 only in display off cases.
- */
- ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
- break;
case IP_VERSION(3, 5, 1):
ret = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
break;
@@ -3293,16 +3275,16 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
for (k = 0; k < dc_state->stream_count; k++) {
bundle->stream_update.stream = dc_state->streams[k];
- for (m = 0; m < dc_state->stream_status->plane_count; m++) {
+ for (m = 0; m < dc_state->stream_status[k].plane_count; m++) {
bundle->surface_updates[m].surface =
- dc_state->stream_status->plane_states[m];
+ dc_state->stream_status[k].plane_states[m];
bundle->surface_updates[m].surface->force_full_update =
true;
}
update_planes_and_stream_adapter(dm->dc,
UPDATE_TYPE_FULL,
- dc_state->stream_status->plane_count,
+ dc_state->stream_status[k].plane_count,
dc_state->streams[k],
&bundle->stream_update,
bundle->surface_updates);
@@ -3404,11 +3386,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
return 0;
}
-
- /* leave display off for S4 sequence */
- if (adev->in_s4)
- return 0;
-
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
dc_state_release(dm_state->context);
dm_state->context = dc_state_create(dm->dc, NULL);
@@ -5662,9 +5639,9 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
case DRM_COLOR_YCBCR_BT2020:
if (full_range)
- *color_space = COLOR_SPACE_2020_YCBCR;
+ *color_space = COLOR_SPACE_2020_YCBCR_FULL;
else
- return -EINVAL;
+ *color_space = COLOR_SPACE_2020_YCBCR_LIMITED;
break;
default:
@@ -6160,7 +6137,7 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing,
if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
color_space = COLOR_SPACE_2020_RGB_FULLRANGE;
else
- color_space = COLOR_SPACE_2020_YCBCR;
+ color_space = COLOR_SPACE_2020_YCBCR_LIMITED;
break;
case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601
default:
@@ -7505,12 +7482,12 @@ cleanup:
}
struct dc_stream_state *
-create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+create_validate_stream_for_sink(struct drm_connector *connector,
const struct drm_display_mode *drm_mode,
const struct dm_connector_state *dm_state,
const struct dc_stream_state *old_stream)
{
- struct drm_connector *connector = &aconnector->base;
+ struct amdgpu_dm_connector *aconnector = NULL;
struct amdgpu_device *adev = drm_to_adev(connector->dev);
struct dc_stream_state *stream;
const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
@@ -7521,8 +7498,12 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (!dm_state)
return NULL;
- if (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A ||
- aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ if (aconnector &&
+ (aconnector->dc_link->connector_signal == SIGNAL_TYPE_HDMI_TYPE_A ||
+ aconnector->dc_link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER))
bpc_limit = 8;
do {
@@ -7534,10 +7515,11 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
break;
}
- if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ dc_result = dc_validate_stream(adev->dm.dc, stream);
+
+ if (!aconnector) /* writeback connector */
return stream;
- dc_result = dc_validate_stream(adev->dm.dc, stream);
if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
@@ -7567,7 +7549,7 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
__func__, __LINE__);
aconnector->force_yuv420_output = true;
- stream = create_validate_stream_for_sink(aconnector, drm_mode,
+ stream = create_validate_stream_for_sink(connector, drm_mode,
dm_state, old_stream);
aconnector->force_yuv420_output = false;
}
@@ -7582,6 +7564,9 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
struct dc_sink *dc_sink;
/* TODO: Unhardcode stream count */
struct dc_stream_state *stream;
+ /* we always have an amdgpu_dm_connector here since we got
+ * here via the amdgpu_dm_connector_helper_funcs
+ */
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
@@ -7606,7 +7591,7 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
drm_mode_set_crtcinfo(mode, 0);
- stream = create_validate_stream_for_sink(aconnector, mode,
+ stream = create_validate_stream_for_sink(connector, mode,
to_dm_connector_state(connector->state),
NULL);
if (stream) {
@@ -8386,7 +8371,7 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
int i;
int result = -EIO;
- if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
+ if (!ddc_service->ddc_pin)
return result;
cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
@@ -10658,7 +10643,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
goto skip_modeset;
- new_stream = create_validate_stream_for_sink(aconnector,
+ new_stream = create_validate_stream_for_sink(connector,
&new_crtc_state->mode,
dm_new_conn_state,
dm_old_crtc_state->stream);
@@ -10901,6 +10886,9 @@ static bool should_reset_plane(struct drm_atomic_state *state,
state->allow_modeset)
return true;
+ if (amdgpu_in_reset(adev) && state->allow_modeset)
+ return true;
+
/* Exit early if we know that we're adding or removing the plane. */
if (old_plane_state->crtc != new_plane_state->crtc)
return true;
@@ -12618,7 +12606,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
* Transient states before tunneling is enabled could
* lead to this error. We can ignore this for now.
*/
- if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
+ if (p_notify->result == AUX_RET_ERROR_PROTOCOL_ERROR) {
DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
payload->address, payload->length,
p_notify->result);
@@ -12627,22 +12615,15 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
goto out;
}
+ payload->reply[0] = adev->dm.dmub_notify->aux_reply.command & 0xF;
+ if (adev->dm.dmub_notify->aux_reply.command & 0xF0)
+ /* The reply is stored in the top nibble of the command. */
+ payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF;
- payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
- if (!payload->write && p_notify->aux_reply.length &&
- (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
-
- if (payload->length != p_notify->aux_reply.length) {
- DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
- p_notify->aux_reply.length,
- payload->address, payload->length);
- *operation_result = AUX_RET_ERROR_INVALID_REPLY;
- goto out;
- }
-
+ /*write req may receive a byte indicating partially written number as well*/
+ if (p_notify->aux_reply.length)
memcpy(payload->data, p_notify->aux_reply.data,
p_notify->aux_reply.length);
- }
/* success */
ret = p_notify->aux_reply.length;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index d2703ca7dff3..195fec9048df 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -989,7 +989,7 @@ int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int
struct set_config_cmd_payload *payload, enum set_config_status *operation_result);
struct dc_stream_state *
- create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ create_validate_stream_for_sink(struct drm_connector *connector,
const struct drm_display_mode *drm_mode,
const struct dm_connector_state *dm_state,
const struct dc_stream_state *old_stream);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 049046c60462..c7d13e743e6c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1169,7 +1169,7 @@ static int amdgpu_current_colorspace_show(struct seq_file *m, void *data)
case COLOR_SPACE_2020_RGB_FULLRANGE:
seq_puts(m, "BT2020_RGB");
break;
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
seq_puts(m, "BT2020_YCC");
break;
default:
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index c0dc23244049..10ba4d7bf632 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -172,7 +172,10 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
struct mod_hdcp_display_adjustment display_adjust;
unsigned int conn_index = aconnector->base.index;
- mutex_lock(&hdcp_w->mutex);
+ guard(mutex)(&hdcp_w->mutex);
+ drm_connector_get(&aconnector->base);
+ if (hdcp_w->aconnector[conn_index])
+ drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
hdcp_w->aconnector[conn_index] = aconnector;
memset(&link_adjust, 0, sizeof(link_adjust));
@@ -209,7 +212,6 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
mod_hdcp_update_display(&hdcp_w->hdcp, conn_index, &link_adjust, &display_adjust, &hdcp_w->output);
process_output(hdcp_w);
- mutex_unlock(&hdcp_w->mutex);
}
static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
@@ -220,8 +222,7 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
struct drm_connector_state *conn_state = aconnector->base.state;
unsigned int conn_index = aconnector->base.index;
- mutex_lock(&hdcp_w->mutex);
- hdcp_w->aconnector[conn_index] = aconnector;
+ guard(mutex)(&hdcp_w->mutex);
/* the removal of display will invoke auth reset -> hdcp destroy and
* we'd expect the Content Protection (CP) property changed back to
@@ -237,9 +238,11 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
}
mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
-
+ if (hdcp_w->aconnector[conn_index]) {
+ drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
+ hdcp_w->aconnector[conn_index] = NULL;
+ }
process_output(hdcp_w);
- mutex_unlock(&hdcp_w->mutex);
}
void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
@@ -247,7 +250,7 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
unsigned int conn_index;
- mutex_lock(&hdcp_w->mutex);
+ guard(mutex)(&hdcp_w->mutex);
mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output);
@@ -256,11 +259,13 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde
for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) {
hdcp_w->encryption_status[conn_index] =
MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ if (hdcp_w->aconnector[conn_index]) {
+ drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
+ hdcp_w->aconnector[conn_index] = NULL;
+ }
}
process_output(hdcp_w);
-
- mutex_unlock(&hdcp_w->mutex);
}
void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
@@ -277,7 +282,7 @@ static void event_callback(struct work_struct *work)
hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue,
callback_dwork);
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
cancel_delayed_work(&hdcp_work->callback_dwork);
@@ -285,8 +290,6 @@ static void event_callback(struct work_struct *work)
&hdcp_work->output);
process_output(hdcp_work);
-
- mutex_unlock(&hdcp_work->mutex);
}
static void event_property_update(struct work_struct *work)
@@ -323,7 +326,7 @@ static void event_property_update(struct work_struct *work)
continue;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
if (conn_state->commit) {
ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done,
@@ -355,7 +358,6 @@ static void event_property_update(struct work_struct *work)
drm_hdcp_update_content_protection(connector,
DRM_MODE_CONTENT_PROTECTION_DESIRED);
}
- mutex_unlock(&hdcp_work->mutex);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
}
@@ -368,7 +370,7 @@ static void event_property_validate(struct work_struct *work)
struct amdgpu_dm_connector *aconnector;
unsigned int conn_index;
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX;
conn_index++) {
@@ -408,8 +410,6 @@ static void event_property_validate(struct work_struct *work)
schedule_work(&hdcp_work->property_update_work);
}
}
-
- mutex_unlock(&hdcp_work->mutex);
}
static void event_watchdog_timer(struct work_struct *work)
@@ -420,7 +420,7 @@ static void event_watchdog_timer(struct work_struct *work)
struct hdcp_workqueue,
watchdog_timer_dwork);
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
@@ -429,8 +429,6 @@ static void event_watchdog_timer(struct work_struct *work)
&hdcp_work->output);
process_output(hdcp_work);
-
- mutex_unlock(&hdcp_work->mutex);
}
static void event_cpirq(struct work_struct *work)
@@ -439,13 +437,11 @@ static void event_cpirq(struct work_struct *work)
hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work);
- mutex_lock(&hdcp_work->mutex);
+ guard(mutex)(&hdcp_work->mutex);
mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output);
process_output(hdcp_work);
-
- mutex_unlock(&hdcp_work->mutex);
}
void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
@@ -479,7 +475,7 @@ static bool enable_assr(void *handle, struct dc_link *link)
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
- mutex_lock(&psp->dtm_context.mutex);
+ guard(mutex)(&psp->dtm_context.mutex);
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE;
@@ -494,8 +490,6 @@ static bool enable_assr(void *handle, struct dc_link *link)
res = false;
}
- mutex_unlock(&psp->dtm_context.mutex);
-
return res;
}
@@ -504,6 +498,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
struct hdcp_workqueue *hdcp_work = handle;
struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx;
int link_index = aconnector->dc_link->link_index;
+ unsigned int conn_index = aconnector->base.index;
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
@@ -557,13 +552,14 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
(!!aconnector->base.state) ?
aconnector->base.state->hdcp_content_type : -1);
- mutex_lock(&hdcp_w->mutex);
+ guard(mutex)(&hdcp_w->mutex);
mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
-
+ drm_connector_get(&aconnector->base);
+ if (hdcp_w->aconnector[conn_index])
+ drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
+ hdcp_w->aconnector[conn_index] = aconnector;
process_output(hdcp_w);
- mutex_unlock(&hdcp_w->mutex);
-
}
/**
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index fbd80d8545a8..a2532907c7be 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -912,7 +912,7 @@ dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len)
{
struct drm_connector *connector = data;
struct acpi_device *acpidev = ACPI_COMPANION(connector->dev->dev);
- unsigned char start = block * EDID_LENGTH;
+ unsigned short start = block * EDID_LENGTH;
struct edid *edid;
int r;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 07e744da7bf4..c3759a1c32ce 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -51,6 +51,9 @@
#define PEAK_FACTOR_X1000 1006
+/*
+ * This function handles both native AUX and I2C-Over-AUX transactions.
+ */
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
@@ -59,6 +62,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
enum aux_return_code_type operation_result;
struct amdgpu_device *adev;
struct ddc_service *ddc;
+ uint8_t copy[16];
if (WARN_ON(msg->size > 16))
return -E2BIG;
@@ -74,6 +78,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
(msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0;
payload.defer_delay = 0;
+ if (payload.write) {
+ memcpy(copy, msg->buffer, msg->size);
+ payload.data = copy;
+ }
+
result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
&operation_result);
@@ -87,15 +96,25 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
if (adev->dm.aux_hpd_discon_quirk) {
if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE &&
operation_result == AUX_RET_ERROR_HPD_DISCON) {
- result = 0;
+ result = msg->size;
operation_result = AUX_RET_SUCCESS;
}
}
- if (payload.write && result >= 0)
- result = msg->size;
+ /*
+ * result equals to 0 includes the cases of AUX_DEFER/I2C_DEFER
+ */
+ if (payload.write && result >= 0) {
+ if (result) {
+ /*one byte indicating partially written bytes*/
+ drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX partially written\n");
+ result = payload.data[0];
+ } else if (!payload.reply[0])
+ /*I2C_ACK|AUX_ACK*/
+ result = msg->size;
+ }
- if (result < 0)
+ if (result < 0) {
switch (operation_result) {
case AUX_RET_SUCCESS:
break;
@@ -114,6 +133,13 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
break;
}
+ drm_dbg_dp(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result);
+ }
+
+ if (payload.reply[0])
+ drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.",
+ payload.reply[0]);
+
return result;
}
@@ -1625,7 +1651,6 @@ int pre_validate_dsc(struct drm_atomic_state *state,
if (ind >= 0) {
struct drm_connector *connector;
- struct amdgpu_dm_connector *aconnector;
struct drm_connector_state *drm_new_conn_state;
struct dm_connector_state *dm_new_conn_state;
struct dm_crtc_state *dm_old_crtc_state;
@@ -1633,15 +1658,14 @@ int pre_validate_dsc(struct drm_atomic_state *state,
connector =
amdgpu_dm_find_first_crtc_matching_connector(state,
state->crtcs[ind].ptr);
- aconnector = to_amdgpu_dm_connector(connector);
drm_new_conn_state =
drm_atomic_get_new_connector_state(state,
- &aconnector->base);
+ connector);
dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
dm_old_crtc_state = to_dm_crtc_state(state->crtcs[ind].old_state);
local_dc_state->streams[i] =
- create_validate_stream_for_sink(aconnector,
+ create_validate_stream_for_sink(connector,
&state->crtcs[ind].new_state->mode,
dm_new_conn_state,
dm_old_crtc_state->stream);
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
index b2fc4f8e6482..a51c2701da24 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dc_common.c
@@ -40,7 +40,8 @@ bool is_rgb_cspace(enum dc_color_space output_color_space)
case COLOR_SPACE_YCBCR709:
case COLOR_SPACE_YCBCR601_LIMITED:
case COLOR_SPACE_YCBCR709_LIMITED:
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
+ case COLOR_SPACE_2020_YCBCR_FULL:
return false;
default:
/* Add a case to switch */
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 7d18f372ce7a..6bc59b7ef007 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -101,7 +101,6 @@ static void init_dig_encoder_control(struct bios_parser *bp)
bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v1_5;
break;
default:
- dm_output_to_console("Don't have dig_encoder_control for v%d\n", version);
bp->cmd_tbl.dig_encoder_control = encoder_control_fallback;
break;
}
@@ -238,7 +237,6 @@ static void init_transmitter_control(struct bios_parser *bp)
bp->cmd_tbl.transmitter_control = transmitter_control_v1_7;
break;
default:
- dm_output_to_console("Don't have transmitter_control for v%d\n", crev);
bp->cmd_tbl.transmitter_control = transmitter_control_fallback;
break;
}
@@ -408,8 +406,6 @@ static void init_set_pixel_clock(struct bios_parser *bp)
bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
break;
default:
- dm_output_to_console("Don't have set_pixel_clock for v%d\n",
- BIOS_CMD_TABLE_PARA_REVISION(setpixelclock));
bp->cmd_tbl.set_pixel_clock = set_pixel_clock_fallback;
break;
}
@@ -554,7 +550,6 @@ static void init_set_crtc_timing(struct bios_parser *bp)
set_crtc_using_dtd_timing_v3;
break;
default:
- dm_output_to_console("Don't have set_crtc_timing for v%d\n", dtd_version);
bp->cmd_tbl.set_crtc_timing = NULL;
break;
}
@@ -671,8 +666,6 @@ static void init_enable_crtc(struct bios_parser *bp)
bp->cmd_tbl.enable_crtc = enable_crtc_v1;
break;
default:
- dm_output_to_console("Don't have enable_crtc for v%d\n",
- BIOS_CMD_TABLE_PARA_REVISION(enablecrtc));
bp->cmd_tbl.enable_crtc = NULL;
break;
}
@@ -864,8 +857,6 @@ static void init_set_dce_clock(struct bios_parser *bp)
bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
break;
default:
- dm_output_to_console("Don't have set_dce_clock for v%d\n",
- BIOS_CMD_TABLE_PARA_REVISION(setdceclock));
bp->cmd_tbl.set_dce_clock = NULL;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 73458e295103..df8139bda142 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -87,8 +87,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
return true;
default:
- /* Unsupported DCE */
- BREAK_TO_DEBUGGER();
+ *h = dal_cmd_tbl_helper_dce112_get_table2();
return false;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index a0fb4481d2f1..e4d22f74f986 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -130,7 +130,7 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
- int display_count;
+ int display_count = 0;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
@@ -194,8 +194,6 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK)
new_clocks->dppclk_khz = MIN_DPP_DISP_CLK;
- if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK)
- new_clocks->dispclk_khz = MIN_DPP_DISP_CLK;
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
@@ -204,15 +202,19 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
update_dppclk = true;
}
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- /* No need to apply the w/a if we haven't taken over from bios yet */
- if (clk_mgr_base->clks.dispclk_khz)
- dcn315_disable_otg_wa(clk_mgr_base, context, true);
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
+ (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
+ int requested_dispclk_khz = new_clocks->dispclk_khz;
+ dcn315_disable_otg_wa(clk_mgr_base, context, true);
+
+ /* Clamp the requested clock to PMFW based on their limit. */
+ if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz)
+ requested_dispclk_khz = dc->debug.min_disp_clk_khz;
+
+ dcn315_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
- dcn315_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- if (clk_mgr_base->clks.dispclk_khz)
- dcn315_disable_otg_wa(clk_mgr_base, context, false);
+ dcn315_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index c3e50c3aaa60..49efea0c8fcf 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -140,7 +140,7 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
- int display_count;
+ int display_count = 0;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
@@ -201,8 +201,6 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
if (new_clocks->dppclk_khz < 100000)
new_clocks->dppclk_khz = 100000;
- if (new_clocks->dispclk_khz < 100000)
- new_clocks->dispclk_khz = 100000;
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
@@ -211,11 +209,18 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
update_dppclk = true;
}
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
+ (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
+ int requested_dispclk_khz = new_clocks->dispclk_khz;
+
dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
+ /* Clamp the requested clock to PMFW based on their limit. */
+ if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz)
+ requested_dispclk_khz = dc->debug.min_disp_clk_khz;
+
+ dcn316_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
- dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 1648226586e2..1f47931c2daf 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -467,14 +467,19 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
update_dppclk = true;
}
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
+ (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
+ int requested_dispclk_khz = new_clocks->dispclk_khz;
+
dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
- if (dc->debug.min_disp_clk_khz > 0 && new_clocks->dispclk_khz < dc->debug.min_disp_clk_khz)
- new_clocks->dispclk_khz = dc->debug.min_disp_clk_khz;
+ /* Clamp the requested clock to PMFW based on their limit. */
+ if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz)
+ requested_dispclk_khz = dc->debug.min_disp_clk_khz;
+ dcn35_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
- dcn35_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
+
dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
index 8082bb877611..a3b8e3d4a429 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
@@ -24,6 +24,8 @@
#include "dml/dcn401/dcn401_fpu.h"
+#define DCN_BASE__INST0_SEG1 0x000000C0
+
#define mmCLK01_CLK0_CLK_PLL_REQ 0x16E37
#define mmCLK01_CLK0_CLK0_DFS_CNTL 0x16E69
#define mmCLK01_CLK0_CLK1_DFS_CNTL 0x16E6C
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 4683c7ef4507..0ce0ad7f9839 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -276,6 +276,7 @@ static bool create_links(
link->link_id.type = OBJECT_TYPE_CONNECTOR;
link->link_id.id = CONNECTOR_ID_VIRTUAL;
link->link_id.enum_id = ENUM_ID_1;
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
if (!link->link_enc) {
@@ -438,9 +439,12 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
* Don't adjust DRR while there's bandwidth optimizations pending to
* avoid conflicting with firmware updates.
*/
- if (dc->ctx->dce_version > DCE_VERSION_MAX)
- if (dc->optimized_required || dc->wm_optimized_required)
+ if (dc->ctx->dce_version > DCE_VERSION_MAX) {
+ if (dc->optimized_required || dc->wm_optimized_required) {
+ stream->adjust.timing_adjust_pending = true;
return false;
+ }
+ }
dc_exit_ips_for_hw_access(dc);
@@ -452,6 +456,7 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
if (dc->caps.max_v_total != 0 &&
(adjust->v_total_max > dc->caps.max_v_total || adjust->v_total_min > dc->caps.max_v_total)) {
+ stream->adjust.timing_adjust_pending = false;
if (adjust->allow_otg_v_count_halt)
return set_long_vtotal(dc, stream, adjust);
else
@@ -465,7 +470,7 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
dc->hwss.set_drr(&pipe,
1,
*adjust);
-
+ stream->adjust.timing_adjust_pending = false;
return true;
}
}
@@ -3127,8 +3132,14 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->vrr_active_fixed)
stream->vrr_active_fixed = *update->vrr_active_fixed;
- if (update->crtc_timing_adjust)
+ if (update->crtc_timing_adjust) {
+ if (stream->adjust.v_total_min != update->crtc_timing_adjust->v_total_min ||
+ stream->adjust.v_total_max != update->crtc_timing_adjust->v_total_max ||
+ stream->adjust.timing_adjust_pending)
+ update->crtc_timing_adjust->timing_adjust_pending = true;
stream->adjust = *update->crtc_timing_adjust;
+ update->crtc_timing_adjust->timing_adjust_pending = false;
+ }
if (update->dpms_off)
stream->dpms_off = *update->dpms_off;
@@ -4902,7 +4913,8 @@ static bool full_update_required(struct dc *dc,
stream_update->lut3d_func ||
stream_update->pending_test_pattern ||
stream_update->crtc_timing_adjust ||
- stream_update->scaler_sharpener_update))
+ stream_update->scaler_sharpener_update ||
+ stream_update->hw_cursor_req))
return true;
if (stream) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 6eb9bae3af91..4f54e75a8f95 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -176,7 +176,7 @@ static bool is_ycbcr2020_type(
{
bool ret = false;
- if (color_space == COLOR_SPACE_2020_YCBCR)
+ if (color_space == COLOR_SPACE_2020_YCBCR_LIMITED || color_space == COLOR_SPACE_2020_YCBCR_FULL)
ret = true;
return ret;
}
@@ -247,7 +247,8 @@ void color_space_to_black_color(
case COLOR_SPACE_YCBCR709_BLACK:
case COLOR_SPACE_YCBCR601_LIMITED:
case COLOR_SPACE_YCBCR709_LIMITED:
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
+ case COLOR_SPACE_2020_YCBCR_FULL:
*black_color = black_color_format[BLACK_COLOR_FORMAT_YUV_CV];
break;
@@ -563,6 +564,7 @@ void set_p_state_switch_method(
if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba)
return;
+ pipe_ctx->p_state_type = P_STATE_UNKNOWN;
if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] !=
dm_dram_clock_change_unsupported) {
/* MCLK switching is supported */
@@ -609,6 +611,21 @@ void set_p_state_switch_method(
}
}
+void set_drr_and_clear_adjust_pending(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream,
+ struct drr_params *params)
+{
+ /* params can be null.*/
+ if (pipe_ctx && pipe_ctx->stream_res.tg &&
+ pipe_ctx->stream_res.tg->funcs->set_drr)
+ pipe_ctx->stream_res.tg->funcs->set_drr(
+ pipe_ctx->stream_res.tg, params);
+
+ if (stream)
+ stream->adjust.timing_adjust_pending = false;
+}
+
void get_fams2_visual_confirm_color(
struct dc *dc,
struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 298668e9729c..375b3b1d1d18 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -941,6 +941,17 @@ static void calculate_adjust_recout_for_visual_confirm(struct pipe_ctx *pipe_ctx
*base_offset = VISUAL_CONFIRM_BASE_DEFAULT;
}
+static void reverse_adjust_recout_for_visual_confirm(struct rect *recout,
+ struct pipe_ctx *pipe_ctx)
+{
+ int dpp_offset, base_offset;
+
+ calculate_adjust_recout_for_visual_confirm(pipe_ctx, &base_offset,
+ &dpp_offset);
+ recout->height += base_offset;
+ recout->height += dpp_offset;
+}
+
static void adjust_recout_for_visual_confirm(struct rect *recout,
struct pipe_ctx *pipe_ctx)
{
@@ -1642,6 +1653,62 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
return res;
}
+bool resource_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *test_pipe, *split_pipe;
+ struct rect r1 = pipe_ctx->plane_res.scl_data.recout;
+ int r1_right, r1_bottom;
+ int cur_layer = pipe_ctx->plane_state->layer_index;
+
+ reverse_adjust_recout_for_visual_confirm(&r1, pipe_ctx);
+ r1_right = r1.x + r1.width;
+ r1_bottom = r1.y + r1.height;
+
+ /**
+ * Disable the cursor if there's another pipe above this with a
+ * plane that contains this pipe's viewport to prevent double cursor
+ * and incorrect scaling artifacts.
+ */
+ for (test_pipe = pipe_ctx->top_pipe; test_pipe;
+ test_pipe = test_pipe->top_pipe) {
+ struct rect r2;
+ int r2_right, r2_bottom;
+ // Skip invisible layer and pipe-split plane on same layer
+ if (!test_pipe->plane_state ||
+ !test_pipe->plane_state->visible ||
+ test_pipe->plane_state->layer_index == cur_layer)
+ continue;
+
+ r2 = test_pipe->plane_res.scl_data.recout;
+ reverse_adjust_recout_for_visual_confirm(&r2, test_pipe);
+ r2_right = r2.x + r2.width;
+ r2_bottom = r2.y + r2.height;
+
+ /**
+ * There is another half plane on same layer because of
+ * pipe-split, merge together per same height.
+ */
+ for (split_pipe = pipe_ctx->top_pipe; split_pipe;
+ split_pipe = split_pipe->top_pipe)
+ if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
+ struct rect r2_half;
+
+ r2_half = split_pipe->plane_res.scl_data.recout;
+ reverse_adjust_recout_for_visual_confirm(&r2_half, split_pipe);
+ r2.x = min(r2_half.x, r2.x);
+ r2.width = r2.width + r2_half.width;
+ r2_right = r2.x + r2.width;
+ r2_bottom = min(r2_bottom, r2_half.y + r2_half.height);
+ break;
+ }
+
+ if (r1.x >= r2.x && r1.y >= r2.y && r1_right <= r2_right && r1_bottom <= r2_bottom)
+ return true;
+ }
+
+ return false;
+}
+
enum dc_status resource_build_scaling_params_for_context(
const struct dc *dc,
@@ -4247,7 +4314,7 @@ static void set_avi_info_frame(
break;
case COLOR_SPACE_2020_RGB_FULLRANGE:
case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR;
hdmi_info.bits.C0_C1 = COLORIMETRY_EXTENDED;
break;
@@ -4261,7 +4328,7 @@ static void set_avi_info_frame(
break;
}
- if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR &&
+ if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR_LIMITED &&
stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) {
hdmi_info.bits.EC0_EC2 = 0;
hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 44ff9abe2880..87b4c2793df3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -991,57 +991,11 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
DC_LOG_DEBUG(" is_cw6_en : %d", diag_data.is_cw6_enabled);
}
-static bool dc_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
-{
- struct pipe_ctx *test_pipe, *split_pipe;
- const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
- struct rect r1 = scl_data->recout, r2, r2_half;
- int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
- int cur_layer = pipe_ctx->plane_state->layer_index;
-
- /**
- * Disable the cursor if there's another pipe above this with a
- * plane that contains this pipe's viewport to prevent double cursor
- * and incorrect scaling artifacts.
- */
- for (test_pipe = pipe_ctx->top_pipe; test_pipe;
- test_pipe = test_pipe->top_pipe) {
- // Skip invisible layer and pipe-split plane on same layer
- if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
- continue;
-
- r2 = test_pipe->plane_res.scl_data.recout;
- r2_r = r2.x + r2.width;
- r2_b = r2.y + r2.height;
-
- /**
- * There is another half plane on same layer because of
- * pipe-split, merge together per same height.
- */
- for (split_pipe = pipe_ctx->top_pipe; split_pipe;
- split_pipe = split_pipe->top_pipe)
- if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
- r2_half = split_pipe->plane_res.scl_data.recout;
- r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
- r2.width = r2.width + r2_half.width;
- r2_r = r2.x + r2.width;
- break;
- }
-
- if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
- return true;
- }
-
- return false;
-}
-
static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx->plane_state != NULL) {
- if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
- return false;
-
- if (dc_can_pipe_disable_cursor(pipe_ctx))
+ if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
+ resource_can_pipe_disable_cursor(pipe_ctx))
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index cc005da75ce4..8bb628ab7855 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -959,6 +959,14 @@ union dp_128b_132b_supported_lttpr_link_rates {
uint8_t raw;
};
+union dp_alpm_lttpr_cap {
+ struct {
+ uint8_t AUX_LESS_ALPM_SUPPORTED :1;
+ uint8_t RESERVED :7;
+ } bits;
+ uint8_t raw;
+};
+
union dp_sink_video_fallback_formats {
struct {
uint8_t dp_1024x768_60Hz_24bpp_support :1;
@@ -1118,6 +1126,7 @@ struct dc_lttpr_caps {
uint8_t max_ext_timeout;
union dp_main_link_channel_coding_lttpr_cap main_link_channel_coding;
union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates;
+ union dp_alpm_lttpr_cap alpm;
uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1];
uint8_t lttpr_ieee_oui[3];
uint8_t lttpr_device_id[6];
@@ -1372,6 +1381,9 @@ struct dp_trace {
#ifndef DPCD_MAX_UNCOMPRESSED_PIXEL_RATE_CAP
#define DPCD_MAX_UNCOMPRESSED_PIXEL_RATE_CAP 0x221c
#endif
+#ifndef DP_LTTPR_ALPM_CAPABILITIES
+#define DP_LTTPR_ALPM_CAPABILITIES 0xF0009
+#endif
#ifndef DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE
#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 5ac55601a6da..d562ddeca512 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -653,7 +653,8 @@ enum dc_color_space {
COLOR_SPACE_YCBCR709_LIMITED,
COLOR_SPACE_2020_RGB_FULLRANGE,
COLOR_SPACE_2020_RGB_LIMITEDRANGE,
- COLOR_SPACE_2020_YCBCR,
+ COLOR_SPACE_2020_YCBCR_LIMITED,
+ COLOR_SPACE_2020_YCBCR_FULL,
COLOR_SPACE_ADOBERGB,
COLOR_SPACE_DCIP3,
COLOR_SPACE_DISPLAYNATIVE,
@@ -661,6 +662,7 @@ enum dc_color_space {
COLOR_SPACE_APPCTRL,
COLOR_SPACE_CUSTOMPOINTS,
COLOR_SPACE_YCBCR709_BLACK,
+ COLOR_SPACE_2020_YCBCR = COLOR_SPACE_2020_YCBCR_LIMITED,
};
enum dc_dither_option {
@@ -1015,6 +1017,7 @@ struct dc_crtc_timing_adjust {
uint32_t v_total_mid;
uint32_t v_total_mid_frame_num;
uint32_t allow_otg_v_count_halt;
+ uint8_t timing_adjust_pending;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 0c2aa91f0a11..e60898c2df01 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -1033,6 +1033,13 @@ struct psr_settings {
unsigned int psr_sdp_transmit_line_num_deadline;
uint8_t force_ffu_mode;
unsigned int psr_power_opt;
+
+ /**
+ * Some panels cannot handle idle pattern during PSR entry.
+ * To power down phy before disable stream to avoid sending
+ * idle pattern.
+ */
+ uint8_t power_down_phy_before_disable_stream;
};
enum replay_coasting_vtotal_type {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index d199e4ed2e59..1130d7619b26 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -418,7 +418,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
dynamic_range_rgb = 1; /*limited range*/
break;
case COLOR_SPACE_2020_RGB_FULLRANGE:
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
case COLOR_SPACE_XR_RGB:
case COLOR_SPACE_MSREF_SCRGB:
case COLOR_SPACE_ADOBERGB:
@@ -430,6 +430,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
case COLOR_SPACE_APPCTRL:
case COLOR_SPACE_CUSTOMPOINTS:
case COLOR_SPACE_UNKNOWN:
+ default:
/* do nothing */
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 88c75c243bf8..ff3b8244ba3d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -418,6 +418,10 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
copy_settings_data->relock_delay_frame_cnt = 0;
if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8)
copy_settings_data->relock_delay_frame_cnt = 2;
+
+ copy_settings_data->power_down_phy_before_disable_stream =
+ link->psr_settings.power_down_phy_before_disable_stream;
+
copy_settings_data->dsc_slice_height = psr_context->dsc_slice_height;
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
index d01a8b8f9595..22e66b375a7f 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
@@ -391,7 +391,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
break;
case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
case COLOR_SPACE_2020_RGB_FULLRANGE:
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
case COLOR_SPACE_XR_RGB:
case COLOR_SPACE_MSREF_SCRGB:
case COLOR_SPACE_ADOBERGB:
@@ -404,6 +404,7 @@ void enc1_stream_encoder_dp_set_stream_attribute(
case COLOR_SPACE_CUSTOMPOINTS:
case COLOR_SPACE_UNKNOWN:
case COLOR_SPACE_YCBCR709_BLACK:
+ default:
/* do nothing */
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
index 098c2a01a850..9e5072627ec7 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
@@ -632,7 +632,7 @@ void enc401_stream_encoder_dp_set_stream_attribute(
break;
case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
case COLOR_SPACE_2020_RGB_FULLRANGE:
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
case COLOR_SPACE_XR_RGB:
case COLOR_SPACE_MSREF_SCRGB:
case COLOR_SPACE_ADOBERGB:
@@ -645,6 +645,7 @@ void enc401_stream_encoder_dp_set_stream_attribute(
case COLOR_SPACE_CUSTOMPOINTS:
case COLOR_SPACE_UNKNOWN:
case COLOR_SPACE_YCBCR709_BLACK:
+ default:
/* do nothing */
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 47d785204f29..c90dee4e9116 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.dcn_downspread_percent = 0.5,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = 0,
+ .do_urgent_latency_adjustment = 1,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
};
void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
@@ -367,6 +367,8 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
clock_limits[i].socclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
+
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dram_speed_mts = clock_limits[i].dram_speed_mts;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
clock_limits[i].dtbclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
index d9e63c4fdd95..17d0b4923b0c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
@@ -401,6 +401,7 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc,
clock_limits[i].socclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dram_speed_mts = clock_limits[i].dram_speed_mts;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
clock_limits[i].dtbclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index 0c8ec30ea672..731fbd4bc600 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -910,7 +910,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
}
//TODO : Could be possibly moved to a common helper layer.
-static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const struct dc_plane_state *plane, unsigned int *plane_id)
+static bool dml21_wrapper_get_plane_id(const struct dc_state *context, unsigned int stream_id, const struct dc_plane_state *plane, unsigned int *plane_id)
{
int i, j;
@@ -918,10 +918,12 @@ static bool dml21_wrapper_get_plane_id(const struct dc_state *context, const str
return false;
for (i = 0; i < context->stream_count; i++) {
- for (j = 0; j < context->stream_status[i].plane_count; j++) {
- if (context->stream_status[i].plane_states[j] == plane) {
- *plane_id = (i << 16) | j;
- return true;
+ if (context->streams[i]->stream_id == stream_id) {
+ for (j = 0; j < context->stream_status[i].plane_count; j++) {
+ if (context->stream_status[i].plane_states[j] == plane) {
+ *plane_id = (i << 16) | j;
+ return true;
+ }
}
}
}
@@ -944,14 +946,14 @@ static unsigned int map_stream_to_dml21_display_cfg(const struct dml2_context *d
return location;
}
-static unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx,
+static unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id,
const struct dc_plane_state *plane, const struct dc_state *context)
{
unsigned int plane_id;
int i = 0;
int location = -1;
- if (!dml21_wrapper_get_plane_id(context, plane, &plane_id)) {
+ if (!dml21_wrapper_get_plane_id(context, stream_id, plane, &plane_id)) {
ASSERT(false);
return -1;
}
@@ -1037,7 +1039,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location;
} else {
for (plane_index = 0; plane_index < context->stream_status[stream_index].plane_count; plane_index++) {
- disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->stream_status[stream_index].plane_states[plane_index], context);
+ disp_cfg_plane_location = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], context);
if (disp_cfg_plane_location < 0)
disp_cfg_plane_location = dml_dispcfg->num_planes++;
@@ -1048,7 +1050,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
populate_dml21_plane_config_from_plane_state(dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->stream_status[stream_index].plane_states[plane_index], context, stream_index);
dml_dispcfg->plane_descriptors[disp_cfg_plane_location].stream_index = disp_cfg_stream_location;
- if (dml21_wrapper_get_plane_id(context, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location]))
+ if (dml21_wrapper_get_plane_id(context, context->streams[stream_index]->stream_id, context->stream_status[stream_index].plane_states[plane_index], &dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location]))
dml_ctx->v21.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id_valid[disp_cfg_plane_location] = true;
/* apply forced pstate policy */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
index 1e56d995cd0e..930e86cdb88a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
@@ -232,7 +232,6 @@ void dml21_program_dc_pipe(struct dml2_context *dml_ctx, struct dc_state *contex
context->bw_ctx.bw.dcn.clk.dppclk_khz = pipe_ctx->plane_res.bw.dppclk_khz;
dml21_populate_mall_allocation_size(context, dml_ctx, pln_prog, pipe_ctx);
- memcpy(&context->bw_ctx.bw.dcn.mcache_allocations[pipe_ctx->pipe_idx], &pln_prog->mcache_allocation, sizeof(struct dml2_mcache_surface_allocation));
bool sub_vp_enabled = is_sub_vp_enabled(pipe_ctx->stream->ctx->dc, context);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
index d6fd13f43c08..ed6584535e89 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
@@ -129,6 +129,7 @@ static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_sta
struct pipe_ctx *dc_main_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
struct pipe_ctx *dc_phantom_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0};
int num_pipes;
+ unsigned int dml_phantom_prog_idx;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
@@ -142,6 +143,9 @@ static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_sta
context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes = 0;
context->bw_ctx.bw.dcn.mall_subvp_size_bytes = 0;
+ /* phantom's start after main planes */
+ dml_phantom_prog_idx = in_ctx->v21.mode_programming.programming->display_config.num_planes;
+
for (dml_prog_idx = 0; dml_prog_idx < DML2_MAX_PLANES; dml_prog_idx++) {
pln_prog = &in_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx];
@@ -167,6 +171,16 @@ static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_sta
dml21_program_dc_pipe(in_ctx, context, dc_phantom_pipes[dc_pipe_index], pln_prog, stream_prog);
}
}
+
+ /* copy per plane mcache allocation */
+ memcpy(&context->bw_ctx.bw.dcn.mcache_allocations[dml_prog_idx], &pln_prog->mcache_allocation, sizeof(struct dml2_mcache_surface_allocation));
+ if (pln_prog->phantom_plane.valid) {
+ memcpy(&context->bw_ctx.bw.dcn.mcache_allocations[dml_phantom_prog_idx],
+ &pln_prog->phantom_plane.mcache_allocation,
+ sizeof(struct dml2_mcache_surface_allocation));
+
+ dml_phantom_prog_idx++;
+ }
}
/* assign global clocks */
@@ -220,7 +234,9 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
if (!result)
return false;
+ DC_FP_START();
result = dml2_build_mode_programming(mode_programming);
+ DC_FP_END();
if (!result)
return false;
@@ -263,7 +279,9 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co
mode_support->dml2_instance = dml_init->dml2_instance;
dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming;
+ DC_FP_START();
is_supported = dml2_check_mode_supported(mode_support);
+ DC_FP_END();
if (!is_supported)
return false;
@@ -274,16 +292,12 @@ bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml
{
bool out = false;
- DC_FP_START();
-
/* Use dml_validate_only for fast_validate path */
if (fast_validate)
out = dml21_check_mode_support(in_dc, context, dml_ctx);
else
out = dml21_mode_check_and_programming(in_dc, context, dml_ctx);
- DC_FP_END();
-
return out;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
index d2d053f2354d..0ab19cf4d242 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
@@ -245,6 +245,7 @@ struct dml2_per_plane_programming {
struct {
bool valid;
struct dml2_plane_parameters descriptor;
+ struct dml2_mcache_surface_allocation mcache_allocation;
struct dml2_dchub_per_pipe_register_set *pipe_regs[DML2_MAX_PLANES];
} phantom_plane;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
index 7216d25c783e..44d2969a904e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
@@ -253,7 +253,8 @@ static void expand_implict_subvp(const struct display_configuation_with_meta *di
static void pack_mode_programming_params_with_implicit_subvp(struct dml2_core_instance *core, const struct display_configuation_with_meta *display_cfg,
const struct dml2_display_cfg *svp_expanded_display_cfg, struct dml2_display_cfg_programming *programming, struct dml2_core_scratch *scratch)
{
- unsigned int stream_index, plane_index, pipe_offset, stream_already_populated_mask, main_plane_index;
+ unsigned int stream_index, plane_index, pipe_offset, stream_already_populated_mask, main_plane_index, mcache_index;
+ unsigned int total_main_mcaches_required = 0;
int total_pipe_regs_copied = 0;
int dml_internal_pipe_index = 0;
const struct dml2_plane_parameters *main_plane;
@@ -324,6 +325,13 @@ static void pack_mode_programming_params_with_implicit_subvp(struct dml2_core_in
dml2_core_calcs_get_mall_allocation(&core->clean_me_up.mode_lib, &programming->plane_programming[plane_index].surface_size_mall_bytes, dml_internal_pipe_index);
+ memcpy(&programming->plane_programming[plane_index].mcache_allocation,
+ &display_cfg->stage2.mcache_allocations[plane_index],
+ sizeof(struct dml2_mcache_surface_allocation));
+ total_main_mcaches_required += programming->plane_programming[plane_index].mcache_allocation.num_mcaches_plane0 +
+ programming->plane_programming[plane_index].mcache_allocation.num_mcaches_plane1 -
+ (programming->plane_programming[plane_index].mcache_allocation.last_slice_sharing.plane0_plane1 ? 1 : 0);
+
for (pipe_offset = 0; pipe_offset < programming->plane_programming[plane_index].num_dpps_required; pipe_offset++) {
// Assign storage for this pipe's register values
programming->plane_programming[plane_index].pipe_regs[pipe_offset] = &programming->pipe_regs[total_pipe_regs_copied];
@@ -362,6 +370,22 @@ static void pack_mode_programming_params_with_implicit_subvp(struct dml2_core_in
memcpy(&programming->plane_programming[main_plane_index].phantom_plane.descriptor, phantom_plane, sizeof(struct dml2_plane_parameters));
dml2_core_calcs_get_mall_allocation(&core->clean_me_up.mode_lib, &programming->plane_programming[main_plane_index].svp_size_mall_bytes, dml_internal_pipe_index);
+
+ /* generate mcache allocation, phantoms use identical mcache configuration, but in the MALL set and unique mcache ID's beginning after all main ID's */
+ memcpy(&programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation,
+ &programming->plane_programming[main_plane_index].mcache_allocation,
+ sizeof(struct dml2_mcache_surface_allocation));
+ for (mcache_index = 0; mcache_index < programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.num_mcaches_plane0; mcache_index++) {
+ programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.global_mcache_ids_plane0[mcache_index] += total_main_mcaches_required;
+ programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.global_mcache_ids_mall_plane0[mcache_index] =
+ programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.global_mcache_ids_plane0[mcache_index];
+ }
+ for (mcache_index = 0; mcache_index < programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.num_mcaches_plane1; mcache_index++) {
+ programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.global_mcache_ids_plane1[mcache_index] += total_main_mcaches_required;
+ programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.global_mcache_ids_mall_plane1[mcache_index] =
+ programming->plane_programming[main_plane_index].phantom_plane.mcache_allocation.global_mcache_ids_plane1[mcache_index];
+ }
+
for (pipe_offset = 0; pipe_offset < programming->plane_programming[main_plane_index].num_dpps_required; pipe_offset++) {
// Assign storage for this pipe's register values
programming->plane_programming[main_plane_index].phantom_plane.pipe_regs[pipe_offset] = &programming->pipe_regs[total_pipe_regs_copied];
@@ -571,6 +595,10 @@ bool core_dcn4_mode_programming(struct dml2_core_mode_programming_in_out *in_out
dml2_core_calcs_get_mall_allocation(&core->clean_me_up.mode_lib, &in_out->programming->plane_programming[plane_index].surface_size_mall_bytes, dml_internal_pipe_index);
+ memcpy(&in_out->programming->plane_programming[plane_index].mcache_allocation,
+ &in_out->display_cfg->stage2.mcache_allocations[plane_index],
+ sizeof(struct dml2_mcache_surface_allocation));
+
for (pipe_offset = 0; pipe_offset < in_out->programming->plane_programming[plane_index].num_dpps_required; pipe_offset++) {
in_out->programming->plane_programming[plane_index].plane_descriptor = &in_out->programming->display_config.plane_descriptors[plane_index];
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index c1ff869512f2..a72b4c05e1fb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -2638,6 +2638,9 @@ static void calculate_mcache_setting(
// Luma/Chroma combine in the last mcache
// In the case of Luma/Chroma combine-mCache (with lc_comb_mcache==1), all mCaches except the last segment are filled as much as possible, when stay aligned to mvmpg boundary
if (*p->lc_comb_mcache && l->is_dual_plane) {
+ /* if luma and chroma planes share an mcache, increase total chroma mcache count */
+ *p->num_mcaches_c = *p->num_mcaches_c + 1;
+
for (n = 0; n < *p->num_mcaches_l - 1; n++)
p->mcache_offsets_l[n] = (n + 1) * l->mvmpg_per_mcache_lb_l * l->mvmpg_access_width_l;
p->mcache_offsets_l[*p->num_mcaches_l - 1] = l->full_vp_access_width_l;
@@ -3710,13 +3713,12 @@ static unsigned int CalculateMaxVStartup(
double line_time_us = (double)timing->h_total / ((double)timing->pixel_clock_khz / 1000);
unsigned int vblank_actual = timing->v_total - timing->v_active;
unsigned int vblank_nom_default_in_line = (unsigned int)math_floor2((double)vblank_nom_default_us / line_time_us, 1.0);
- unsigned int vblank_nom_input = (unsigned int)math_min2(timing->vblank_nom, vblank_nom_default_in_line);
- unsigned int vblank_avail = (vblank_nom_input == 0) ? vblank_nom_default_in_line : vblank_nom_input;
+ unsigned int vblank_avail = (timing->vblank_nom == 0) ? vblank_nom_default_in_line : (unsigned int)timing->vblank_nom;
vblank_size = (unsigned int)math_min2(vblank_actual, vblank_avail);
if (timing->interlaced && !ptoi_supported)
- max_vstartup_lines = (unsigned int)(math_floor2(vblank_size / 2.0, 1.0));
+ max_vstartup_lines = (unsigned int)(math_floor2((vblank_size - 1) / 2.0, 1.0));
else
max_vstartup_lines = vblank_size - (unsigned int)math_max2(1.0, math_ceil2(write_back_delay_us / line_time_us, 1.0));
#ifdef __DML_VBA_DEBUG__
@@ -4912,6 +4914,7 @@ static double get_urgent_bandwidth_required(
double ReadBandwidthChroma[],
double PrefetchBandwidthLuma[],
double PrefetchBandwidthChroma[],
+ double PrefetchBandwidthOto[],
double excess_vactive_fill_bw_l[],
double excess_vactive_fill_bw_c[],
double cursor_bw[],
@@ -4975,8 +4978,9 @@ static double get_urgent_bandwidth_required(
l->vm_row_bw = NumberOfDPP[k] * prefetch_vmrow_bw[k];
l->flip_and_active_bw = l->per_plane_flip_bw[k] + ReadBandwidthLuma[k] * l->adj_factor_p0 + ReadBandwidthChroma[k] * l->adj_factor_p1 + cursor_bw[k] * l->adj_factor_cur;
l->flip_and_prefetch_bw = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre;
+ l->flip_and_prefetch_bw_oto = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthOto[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre;
l->active_and_excess_bw = (ReadBandwidthLuma[k] + excess_vactive_fill_bw_l[k]) * l->tmp_nom_adj_factor_p0 + (ReadBandwidthChroma[k] + excess_vactive_fill_bw_c[k]) * l->tmp_nom_adj_factor_p1 + dpte_row_bw[k] + meta_row_bw[k];
- surface_required_bw[k] = math_max4(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw);
+ surface_required_bw[k] = math_max5(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw, l->flip_and_prefetch_bw_oto);
/* export peak required bandwidth for the surface */
surface_peak_required_bw[k] = math_max2(surface_required_bw[k], surface_peak_required_bw[k]);
@@ -5174,6 +5178,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->Tsw_est3 = 0.0;
s->cursor_prefetch_bytes = 0;
*p->prefetch_cursor_bw = 0;
+ *p->RequiredPrefetchBWOTO = 0.0;
dcc_mrq_enable = (p->dcc_enable && p->mrq_present);
@@ -5387,6 +5392,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->prefetch_bw_oto += (p->swath_width_chroma_ub * p->myPipe->BytePerPixelC) / s->LineTime;
}
+ /* oto prefetch bw should be always be less than total vactive bw */
+ DML2_ASSERT(s->prefetch_bw_oto < s->per_pipe_vactive_sw_bw * p->myPipe->DPPPerSurface);
+
s->prefetch_bw_oto = math_max2(s->per_pipe_vactive_sw_bw, s->prefetch_bw_oto) * p->mall_prefetch_sdp_overhead_factor;
s->prefetch_bw_oto = math_min2(s->prefetch_bw_oto, *p->prefetch_sw_bytes/(s->min_Lsw_oto*s->LineTime));
@@ -5397,6 +5405,12 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
p->vm_bytes * p->HostVMInefficiencyFactor / (31 * s->LineTime) - *p->Tno_bw,
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / (15 * s->LineTime));
+ /* oto bw needs to be outputted even if the oto schedule isn't being used to avoid ms/mp mismatch.
+ * mp will fail if ms decides to use equ schedule and mp decides to use oto schedule
+ * and the required bandwidth increases when going from ms to mp
+ */
+ *p->RequiredPrefetchBWOTO = s->prefetch_bw_oto;
+
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: vactive_sw_bw_l = %f\n", __func__, p->vactive_sw_bw_l);
dml2_printf("DML::%s: vactive_sw_bw_c = %f\n", __func__, p->vactive_sw_bw_c);
@@ -6157,6 +6171,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
l->zero_array, //PrefetchBandwidthLuma,
l->zero_array, //PrefetchBandwidthChroma,
+ l->zero_array, //PrefetchBWOTO
l->zero_array,
l->zero_array,
l->zero_array,
@@ -6193,6 +6208,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
l->zero_array, //PrefetchBandwidthLuma,
l->zero_array, //PrefetchBandwidthChroma,
+ l->zero_array, //PrefetchBWOTO
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6229,6 +6245,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
p->prefetch_bandwidth_l,
p->prefetch_bandwidth_c,
+ p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6265,6 +6282,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
p->prefetch_bandwidth_l,
p->prefetch_bandwidth_c,
+ p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -6301,6 +6319,7 @@ static void calculate_peak_bandwidth_required(
p->surface_read_bandwidth_c,
p->prefetch_bandwidth_l,
p->prefetch_bandwidth_c,
+ p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw
p->excess_vactive_fill_bw_l,
p->excess_vactive_fill_bw_c,
p->cursor_bw,
@@ -9063,6 +9082,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->ms.VRatioPreC[k];
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->ms.RequiredPrefetchPixelDataBWLuma[k]; // prefetch_sw_bw_l
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->ms.RequiredPrefetchPixelDataBWChroma[k]; // prefetch_sw_bw_c
+ CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &mode_lib->ms.RequiredPrefetchBWOTO[k];
CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->ms.NoTimeForDynamicMetadata[k];
CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k];
CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->ms.Tno_bw_flip[k];
@@ -9207,6 +9227,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
@@ -9373,6 +9394,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c;
calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw;
@@ -11289,6 +11311,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->mp.VRatioPrefetchC[k];
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k];
CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k];
+ CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &s->dummy_single_array[0][k];
CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->mp.NotEnoughTimeForDynamicMetadata[k];
CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->mp.Tno_bw[k];
CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->mp.Tno_bw_flip[k];
@@ -11431,6 +11454,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->mp.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->mp.RequiredPrefetchPixelDataBWChroma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_oto = s->dummy_single_array[0];
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->mp.excess_vactive_fill_bw_l;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->mp.excess_vactive_fill_bw_c;
calculate_peak_bandwidth_params->cursor_bw = mode_lib->mp.cursor_bw;
@@ -11563,6 +11587,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.vactive_sw_bw_c;
calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->mp.RequiredPrefetchPixelDataBWLuma;
calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->mp.RequiredPrefetchPixelDataBWChroma;
+ calculate_peak_bandwidth_params->prefetch_bandwidth_oto = s->dummy_single_array[k];
calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->mp.excess_vactive_fill_bw_l;
calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->mp.excess_vactive_fill_bw_c;
calculate_peak_bandwidth_params->cursor_bw = mode_lib->mp.cursor_bw;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
index 23c0fca5515f..b7cb017b59ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
@@ -484,6 +484,8 @@ struct dml2_core_internal_mode_support {
double WriteBandwidth[DML2_MAX_PLANES][DML2_MAX_WRITEBACK];
double RequiredPrefetchPixelDataBWLuma[DML2_MAX_PLANES];
double RequiredPrefetchPixelDataBWChroma[DML2_MAX_PLANES];
+ /* oto bw should also be considered when calculating urgent bw to avoid situations oto/equ mismatches between ms and mp */
+ double RequiredPrefetchBWOTO[DML2_MAX_PLANES];
double cursor_bw[DML2_MAX_PLANES];
double prefetch_cursor_bw[DML2_MAX_PLANES];
double prefetch_vmrow_bw[DML2_MAX_PLANES];
@@ -1381,6 +1383,7 @@ struct dml2_core_shared_get_urgent_bandwidth_required_locals {
double vm_row_bw;
double flip_and_active_bw;
double flip_and_prefetch_bw;
+ double flip_and_prefetch_bw_oto;
double active_and_excess_bw;
};
@@ -1792,6 +1795,7 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params {
double *VRatioPrefetchC;
double *RequiredPrefetchPixelDataBWLuma;
double *RequiredPrefetchPixelDataBWChroma;
+ double *RequiredPrefetchBWOTO;
bool *NotEnoughTimeForDynamicMetadata;
double *Tno_bw;
double *Tno_bw_flip;
@@ -2025,6 +2029,7 @@ struct dml2_core_calcs_calculate_peak_bandwidth_required_params {
double *surface_read_bandwidth_c;
double *prefetch_bandwidth_l;
double *prefetch_bandwidth_c;
+ double *prefetch_bandwidth_oto;
double *excess_vactive_fill_bw_l;
double *excess_vactive_fill_bw_c;
double *cursor_bw;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index a3324f7b9ba6..15c906c42ec4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -1082,12 +1082,21 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
const struct dml2_fams2_meta *stream_fams2_meta;
unsigned int microschedule_vlines;
unsigned int i;
+ unsigned int mcaches_per_plane;
+ unsigned int total_mcaches_required = 0;
unsigned int num_planes_per_stream[DML2_MAX_PLANES] = { 0 };
/* confirm timing it is not a centered timing */
for (i = 0; i < display_config->display_config.num_planes; i++) {
plane_descriptor = &display_config->display_config.plane_descriptors[i];
+ mcaches_per_plane = 0;
+
+ if (plane_descriptor->surface.dcc.enable) {
+ mcaches_per_plane += display_config->stage2.mcache_allocations[i].num_mcaches_plane0 +
+ display_config->stage2.mcache_allocations[i].num_mcaches_plane1 -
+ (display_config->stage2.mcache_allocations[i].last_slice_sharing.plane0_plane1 ? 1 : 0);
+ }
if (is_bit_set_in_bitfield(mask, (unsigned char)plane_descriptor->stream_index)) {
num_planes_per_stream[plane_descriptor->stream_index]++;
@@ -1098,7 +1107,19 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
plane_descriptor->composition.rotation_angle != dml2_rotation_0) {
return false;
}
+
+ /* phantom requires same number of mcaches as main */
+ if (plane_descriptor->surface.dcc.enable) {
+ mcaches_per_plane *= 2;
+ }
}
+
+ total_mcaches_required += mcaches_per_plane;
+ }
+
+ if (total_mcaches_required > pmo->soc_bb->num_dcc_mcaches) {
+ /* too many mcaches required */
+ return false;
}
for (i = 0; i < DML2_MAX_PLANES; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
index a8f58f8448e4..dc2ce5e77f57 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
@@ -831,7 +831,6 @@ static bool dml2_top_soc15_build_mode_programming(struct dml2_build_mode_program
bool uclk_pstate_success = false;
bool vmin_success = false;
bool stutter_success = false;
- unsigned int i;
memset(l, 0, sizeof(struct dml2_build_mode_programming_locals));
memset(in_out->programming, 0, sizeof(struct dml2_display_cfg_programming));
@@ -977,13 +976,6 @@ static bool dml2_top_soc15_build_mode_programming(struct dml2_build_mode_program
}
/*
- * Populate mcache programming
- */
- for (i = 0; i < in_out->display_config->num_planes; i++) {
- in_out->programming->plane_programming[i].mcache_allocation = l->base_display_config_with_meta.stage2.mcache_allocations[i];
- }
-
- /*
* Call DPMM to map all requirements to minimum clock state
*/
if (result) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index b8a34abaf519..aeb9fae83cac 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -969,7 +969,9 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p
}
}
-static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc_state *context, struct scaler_data *out)
+static struct scaler_data *get_scaler_data_for_plane(
+ const struct dc_plane_state *in,
+ struct dc_state *context)
{
int i;
struct pipe_ctx *temp_pipe = &context->res_ctx.temp_pipe;
@@ -990,7 +992,7 @@ static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc
}
ASSERT(i < MAX_PIPES);
- memcpy(out, &temp_pipe->plane_res.scl_data, sizeof(*out));
+ return &temp_pipe->plane_res.scl_data;
}
static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location,
@@ -1053,11 +1055,7 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out
const struct dc_plane_state *in, struct dc_state *context,
const struct soc_bounding_box_st *soc)
{
- struct scaler_data *scaler_data = kzalloc(sizeof(*scaler_data), GFP_KERNEL);
- if (!scaler_data)
- return;
-
- get_scaler_data_for_plane(in, context, scaler_data);
+ struct scaler_data *scaler_data = get_scaler_data_for_plane(in, context);
out->CursorBPP[location] = dml_cur_32bit;
out->CursorWidth[location] = 256;
@@ -1122,8 +1120,6 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out
out->DynamicMetadataTransmittedBytes[location] = 0;
out->NumberOfCursors[location] = 1;
-
- kfree(scaler_data);
}
static unsigned int map_stream_to_dml_display_cfg(const struct dml2_context *dml2,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
index 0f944fcfd5a5..785226945699 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
@@ -159,6 +159,7 @@ struct dml2_clks_table_entry {
unsigned int dtbclk_mhz;
unsigned int dispclk_mhz;
unsigned int dppclk_mhz;
+ unsigned int dram_speed_mts; /*which is based on wck_ratio*/
};
struct dml2_clks_num_entries {
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index 40acebd13e46..abf439e743f2 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -425,11 +425,6 @@ bool dpp3_get_optimal_number_of_taps(
int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
- if (scl_data->viewport.width > scl_data->h_active &&
- dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
- scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
- return false;
-
/*
* Set default taps if none are provided
* From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling
@@ -467,6 +462,12 @@ bool dpp3_get_optimal_number_of_taps(
else
scl_data->taps.h_taps_c = in_taps->h_taps_c;
+ // Avoid null data in the scl data with this early return, proceed non-adaptive calcualtion first
+ if (scl_data->viewport.width > scl_data->h_active &&
+ dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
+ scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
+ return false;
+
/*Ensure we can support the requested number of vtaps*/
min_taps_y = dc_fixpt_ceil(scl_data->ratios.vert);
min_taps_c = dc_fixpt_ceil(scl_data->ratios.vert_c);
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
index 1236e0f9a256..712aff7e17f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
@@ -120,10 +120,11 @@ void dpp401_set_cursor_attributes(
enum dc_cursor_color_format color_format = cursor_attributes->color_format;
int cur_rom_en = 0;
- // DCN4 should always do Cursor degamma for Cursor Color modes
if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
- cur_rom_en = 1;
+ if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
+ cur_rom_en = 1;
+ }
}
REG_UPDATE_3(CURSOR0_CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c
index 678db949cfe3..759b453385c4 100644
--- a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -323,7 +323,7 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
break;
case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
case COLOR_SPACE_2020_RGB_FULLRANGE:
- case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_2020_YCBCR_LIMITED:
case COLOR_SPACE_XR_RGB:
case COLOR_SPACE_MSREF_SCRGB:
case COLOR_SPACE_ADOBERGB:
@@ -336,6 +336,7 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
case COLOR_SPACE_CUSTOMPOINTS:
case COLOR_SPACE_UNKNOWN:
case COLOR_SPACE_YCBCR709_BLACK:
+ default:
/* do nothing */
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 81f4c386c287..94ceccfc0498 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -1065,7 +1065,8 @@ void dce110_edp_backlight_control(
DC_LOG_DC("edp_receiver_ready_T9 skipped\n");
}
- if (!enable && link->dpcd_sink_ext_caps.bits.oled) {
+ if (!enable) {
+ /*follow oem panel config's requirement*/
pre_T11_delay += link->panel_config.pps.extra_pre_t11_ms;
msleep(pre_T11_delay);
}
@@ -1654,9 +1655,7 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
params.vertical_total_min = stream->adjust.v_total_min;
params.vertical_total_max = stream->adjust.v_total_max;
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx, stream, &params);
// DRR should set trigger event to monitor surface update event
if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
@@ -2104,8 +2103,7 @@ static void set_drr(struct pipe_ctx **pipe_ctx,
struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
if ((tg != NULL) && tg->funcs) {
- if (tg->funcs->set_drr)
- tg->funcs->set_drr(tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, &params);
if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
if (tg->funcs->set_static_screen_control)
tg->funcs->set_static_screen_control(
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 13f9e9b439f6..bbeaefe1ef0d 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -1112,9 +1112,7 @@ static void dcn10_reset_back_end_for_pipe(
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, NULL);
+ set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
}
@@ -3217,8 +3215,7 @@ void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
if ((tg != NULL) && tg->funcs) {
- if (tg->funcs->set_drr)
- tg->funcs->set_drr(tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, &params);
if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
if (tg->funcs->set_static_screen_control)
tg->funcs->set_static_screen_control(
@@ -3427,52 +3424,6 @@ void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
hubbub->funcs->update_dchub(hubbub, dh_data);
}
-static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
-{
- struct pipe_ctx *test_pipe, *split_pipe;
- const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
- struct rect r1 = scl_data->recout, r2, r2_half;
- int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
- int cur_layer = pipe_ctx->plane_state->layer_index;
-
- /**
- * Disable the cursor if there's another pipe above this with a
- * plane that contains this pipe's viewport to prevent double cursor
- * and incorrect scaling artifacts.
- */
- for (test_pipe = pipe_ctx->top_pipe; test_pipe;
- test_pipe = test_pipe->top_pipe) {
- // Skip invisible layer and pipe-split plane on same layer
- if (!test_pipe->plane_state ||
- !test_pipe->plane_state->visible ||
- test_pipe->plane_state->layer_index == cur_layer)
- continue;
-
- r2 = test_pipe->plane_res.scl_data.recout;
- r2_r = r2.x + r2.width;
- r2_b = r2.y + r2.height;
-
- /**
- * There is another half plane on same layer because of
- * pipe-split, merge together per same height.
- */
- for (split_pipe = pipe_ctx->top_pipe; split_pipe;
- split_pipe = split_pipe->top_pipe)
- if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
- r2_half = split_pipe->plane_res.scl_data.recout;
- r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
- r2.width = r2.width + r2_half.width;
- r2_r = r2.x + r2.width;
- break;
- }
-
- if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
- return true;
- }
-
- return false;
-}
-
void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
@@ -3572,7 +3523,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
pos_cpy.enable = false;
- if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
+ if (pos_cpy.enable && resource_can_pipe_disable_cursor(pipe_ctx))
pos_cpy.enable = false;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index b78096a7690e..1a07973ead4f 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -952,9 +952,7 @@ enum dc_status dcn20_enable_stream_timing(
params.vertical_total_max = stream->adjust.v_total_max;
params.vertical_total_mid = stream->adjust.v_total_mid;
params.vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num;
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx, stream, &params);
// DRR should set trigger event to monitor surface update event
if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
@@ -1266,14 +1264,18 @@ static void dcn20_power_on_plane_resources(
struct dce_hwseq *hws,
struct pipe_ctx *pipe_ctx)
{
+ uint32_t org_ip_request_cntl = 0;
+
DC_LOGGER_INIT(hws->ctx->logger);
if (hws->funcs.dpp_root_clock_control)
hws->funcs.dpp_root_clock_control(hws, pipe_ctx->plane_res.dpp->inst, true);
if (REG(DC_IP_REQUEST_CNTL)) {
- REG_SET(DC_IP_REQUEST_CNTL, 0,
- IP_REQUEST_EN, 1);
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
if (hws->funcs.dpp_pg_control)
hws->funcs.dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
@@ -1281,8 +1283,10 @@ static void dcn20_power_on_plane_resources(
if (hws->funcs.hubp_pg_control)
hws->funcs.hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
- REG_SET(DC_IP_REQUEST_CNTL, 0,
- IP_REQUEST_EN, 0);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 0);
+
DC_LOG_DEBUG(
"Un-gated front end for pipe %d\n", pipe_ctx->plane_res.hubp->inst);
}
@@ -2849,9 +2853,7 @@ void dcn20_reset_back_end_for_pipe(
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, NULL);
+ set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
/* TODO - convert symclk_ref_cnts for otg to a bit map to solve
* the case where the same symclk is shared across multiple otg
* instances
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index 03ba01f4ace1..38f889826697 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -538,9 +538,7 @@ static void dcn31_reset_back_end_for_pipe(
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, NULL);
+ set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
/* DPMS may already disable or */
/* dpms_off status is incorrect due to fastboot
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index b907ad1acedd..922b8d71cf1a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -1473,8 +1473,7 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
num_frames = 2 * (frame_rate % 60);
}
}
- if (tg->funcs->set_drr)
- tg->funcs->set_drr(tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, &params);
if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
if (tg->funcs->set_static_screen_control)
tg->funcs->set_static_screen_control(
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 89af3e4afbc2..da8afb08b920 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -830,10 +830,7 @@ enum dc_status dcn401_enable_stream_timing(
}
hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
-
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, &params);
+ set_drr_and_clear_adjust_pending(pipe_ctx, stream, &params);
/* Event triggers and num frames initialized for DRR, but can be
* later updated for PSR use. Note DRR trigger events are generated
@@ -975,52 +972,6 @@ void dcn401_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, enable);
}
-static bool dcn401_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
-{
- struct pipe_ctx *test_pipe, *split_pipe;
- const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
- struct rect r1 = scl_data->recout, r2, r2_half;
- int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
- int cur_layer = pipe_ctx->plane_state->layer_index;
-
- /**
- * Disable the cursor if there's another pipe above this with a
- * plane that contains this pipe's viewport to prevent double cursor
- * and incorrect scaling artifacts.
- */
- for (test_pipe = pipe_ctx->top_pipe; test_pipe;
- test_pipe = test_pipe->top_pipe) {
- // Skip invisible layer and pipe-split plane on same layer
- if (!test_pipe->plane_state ||
- !test_pipe->plane_state->visible ||
- test_pipe->plane_state->layer_index == cur_layer)
- continue;
-
- r2 = test_pipe->plane_res.scl_data.recout;
- r2_r = r2.x + r2.width;
- r2_b = r2.y + r2.height;
-
- /**
- * There is another half plane on same layer because of
- * pipe-split, merge together per same height.
- */
- for (split_pipe = pipe_ctx->top_pipe; split_pipe;
- split_pipe = split_pipe->top_pipe)
- if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
- r2_half = split_pipe->plane_res.scl_data.recout;
- r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
- r2.width = r2.width + r2_half.width;
- r2_r = r2.x + r2.width;
- break;
- }
-
- if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
- return true;
- }
-
- return false;
-}
-
void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy)
{
if (cursor_width <= 128) {
@@ -1211,7 +1162,7 @@ void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
pos_cpy.x = (uint32_t)x_pos;
pos_cpy.y = (uint32_t)y_pos;
- if (pos_cpy.enable && dcn401_can_pipe_disable_cursor(pipe_ctx))
+ if (pos_cpy.enable && resource_can_pipe_disable_cursor(pipe_ctx))
pos_cpy.enable = false;
x_pos = pos_cpy.x - param.recout.x;
@@ -1866,9 +1817,8 @@ void dcn401_reset_back_end_for_pipe(
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- if (pipe_ctx->stream_res.tg->funcs->set_drr)
- pipe_ctx->stream_res.tg->funcs->set_drr(
- pipe_ctx->stream_res.tg, NULL);
+ set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
+
/* TODO - convert symclk_ref_cnts for otg to a bit map to solve
* the case where the same symclk is shared across multiple otg
* instances
@@ -2027,9 +1977,9 @@ static void dcn401_program_pipe(
dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
}
- if (pipe_ctx->update_flags.raw ||
- (pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.raw) ||
- pipe_ctx->stream->update_flags.raw)
+ if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw ||
+ pipe_ctx->plane_state->update_flags.raw ||
+ pipe_ctx->stream->update_flags.raw))
dc->hwss.update_dchubp_dpp(dc, pipe_ctx, context);
if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
@@ -2659,3 +2609,37 @@ void dcn401_detect_pipe_changes(struct dc_state *old_state,
new_pipe->update_flags.bits.test_pattern_changed = 1;
}
}
+
+void dcn401_plane_atomic_power_down(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ uint32_t org_ip_request_cntl = 0;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, dpp->inst, false);
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+
+ hubp->funcs->hubp_reset(hubp);
+ dpp->funcs->dpp_reset(dpp);
+
+ if (org_ip_request_cntl == 0)
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 0);
+
+ DC_LOG_DEBUG(
+ "Power gated front end %d\n", hubp->inst);
+
+ if (hws->funcs.dpp_root_clock_control)
+ hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index 17cea748789e..dbd69d215b8b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -102,4 +102,7 @@ void dcn401_detect_pipe_changes(
struct dc_state *new_state,
struct pipe_ctx *old_pipe,
struct pipe_ctx *new_pipe);
+void dcn401_plane_atomic_power_down(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp);
#endif /* __DC_HWSS_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
index 44cb376f97c1..a4e3501fadbb 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
@@ -123,7 +123,7 @@ static const struct hwseq_private_funcs dcn401_private_funcs = {
.disable_vga = dcn20_disable_vga,
.bios_golden_init = dcn10_bios_golden_init,
.plane_atomic_disable = dcn20_plane_atomic_disable,
- .plane_atomic_power_down = dcn10_plane_atomic_power_down,
+ .plane_atomic_power_down = dcn401_plane_atomic_power_down,
.enable_power_gating_plane = dcn32_enable_power_gating_plane,
.hubp_pg_control = dcn32_hubp_pg_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index a7d66cfd93c9..16ef5250a02e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -46,6 +46,7 @@ struct dce_hwseq;
struct link_resource;
struct dc_dmub_cmd;
struct pg_block_update;
+struct drr_params;
struct subvp_pipe_control_lock_fast_params {
struct dc *dc;
@@ -521,6 +522,11 @@ void set_p_state_switch_method(
struct dc_state *context,
struct pipe_ctx *pipe_ctx);
+void set_drr_and_clear_adjust_pending(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream,
+ struct drr_params *params);
+
void hwss_execute_sequence(struct dc *dc,
struct block_sequence block_sequence[],
int num_steps);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index d558efc6e12f..652d52040f4e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -627,7 +627,7 @@ struct dc_state {
*/
struct bw_context bw_ctx;
- struct block_sequence block_sequence[50];
+ struct block_sequence block_sequence[100];
unsigned int block_sequence_steps;
struct dc_dmub_cmd dc_dmub_cmd[10];
unsigned int dmub_cmd_count;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 7a1ca1e98059..221645c023b5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -221,6 +221,7 @@ enum dentist_divider_range {
CLK_SF(CLK0_CLK_PLL_REQ, FbMult_frac, mask_sh)
#define CLK_REG_LIST_DCN401() \
+ SR(DENTIST_DISPCLK_CNTL), \
CLK_SR_DCN401(CLK0_CLK_PLL_REQ, CLK01, 0), \
CLK_SR_DCN401(CLK0_CLK0_DFS_CNTL, CLK01, 0), \
CLK_SR_DCN401(CLK0_CLK1_DFS_CNTL, CLK01, 0), \
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 0150f2581ee4..0c5675d1c593 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -119,10 +119,14 @@ static const struct dpp_input_csc_matrix __maybe_unused dpp_input_csc_matrix[] =
{ 0x39a6, 0x2568, 0, 0xe0d6,
0xeedd, 0x2568, 0xf925, 0x9a8,
0, 0x2568, 0x43ee, 0xdbb2 } },
- { COLOR_SPACE_2020_YCBCR,
+ { COLOR_SPACE_2020_YCBCR_FULL,
{ 0x2F30, 0x2000, 0, 0xE869,
0xEDB7, 0x2000, 0xFABC, 0xBC6,
0, 0x2000, 0x3C34, 0xE1E6 } },
+ { COLOR_SPACE_2020_YCBCR_LIMITED,
+ { 0x35B9, 0x2543, 0, 0xE2B2,
+ 0xEB2F, 0x2543, 0xFA01, 0x0B1F,
+ 0, 0x2543, 0x4489, 0xDB42 } },
{ COLOR_SPACE_2020_RGB_LIMITEDRANGE,
{ 0x35E0, 0x255F, 0, 0xE2B3,
0xEB20, 0x255F, 0xF9FD, 0xB1E,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index cd1157d225ab..b32d07ce0f08 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -152,6 +152,8 @@ bool resource_attach_surfaces_to_context(
struct dc_state *context,
const struct resource_pool *pool);
+bool resource_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx);
+
#define FREE_PIPE_INDEX_NOT_FOUND -1
/*
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index ec7de9c01fab..e95ec72b4096 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -148,6 +148,7 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init)
void link_set_all_streams_dpms_off_for_link(struct dc_link *link)
{
struct pipe_ctx *pipes[MAX_PIPES];
+ struct dc_stream_state *streams[MAX_PIPES];
struct dc_state *state = link->dc->current_state;
uint8_t count;
int i;
@@ -160,10 +161,18 @@ void link_set_all_streams_dpms_off_for_link(struct dc_link *link)
link_get_master_pipes_with_dpms_on(link, state, &count, pipes);
+ /* The subsequent call to dc_commit_updates_for_stream for a full update
+ * will release the current state and swap to a new state. Releasing the
+ * current state results in the stream pointers in the pipe_ctx structs
+ * to be zero'd. Hence, cache all streams prior to dc_commit_updates_for_stream.
+ */
+ for (i = 0; i < count; i++)
+ streams[i] = pipes[i]->stream;
+
for (i = 0; i < count; i++) {
- stream_update.stream = pipes[i]->stream;
+ stream_update.stream = streams[i];
dc_commit_updates_for_stream(link->ctx->dc, NULL, 0,
- pipes[i]->stream, &stream_update,
+ streams[i], &stream_update,
state);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 44f33e3bc1c5..64e4ae379e34 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -250,21 +250,21 @@ static uint32_t intersect_frl_link_bw_support(
{
uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps;
- // HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode)
- if (hdmi_encoded_link_bw.bits.FRL_MODE) {
- if (hdmi_encoded_link_bw.bits.BW_48Gbps)
- supported_bw_in_kbps = 48000000;
- else if (hdmi_encoded_link_bw.bits.BW_40Gbps)
- supported_bw_in_kbps = 40000000;
- else if (hdmi_encoded_link_bw.bits.BW_32Gbps)
- supported_bw_in_kbps = 32000000;
- else if (hdmi_encoded_link_bw.bits.BW_24Gbps)
- supported_bw_in_kbps = 24000000;
- else if (hdmi_encoded_link_bw.bits.BW_18Gbps)
- supported_bw_in_kbps = 18000000;
- else if (hdmi_encoded_link_bw.bits.BW_9Gbps)
- supported_bw_in_kbps = 9000000;
- }
+ /* Skip checking FRL_MODE bit, as certain PCON will clear
+ * it despite supporting the link BW indicated in the other bits.
+ */
+ if (hdmi_encoded_link_bw.bits.BW_48Gbps)
+ supported_bw_in_kbps = 48000000;
+ else if (hdmi_encoded_link_bw.bits.BW_40Gbps)
+ supported_bw_in_kbps = 40000000;
+ else if (hdmi_encoded_link_bw.bits.BW_32Gbps)
+ supported_bw_in_kbps = 32000000;
+ else if (hdmi_encoded_link_bw.bits.BW_24Gbps)
+ supported_bw_in_kbps = 24000000;
+ else if (hdmi_encoded_link_bw.bits.BW_18Gbps)
+ supported_bw_in_kbps = 18000000;
+ else if (hdmi_encoded_link_bw.bits.BW_9Gbps)
+ supported_bw_in_kbps = 9000000;
return supported_bw_in_kbps;
}
@@ -945,6 +945,9 @@ bool link_decide_link_settings(struct dc_stream_state *stream,
* TODO: add MST specific link training routine
*/
decide_mst_link_settings(link, link_setting);
+ } else if (stream->signal == SIGNAL_TYPE_VIRTUAL) {
+ link_setting->lane_count = LANE_COUNT_FOUR;
+ link_setting->link_rate = LINK_RATE_HIGH3;
} else if (link->connector_signal == SIGNAL_TYPE_EDP) {
/* enable edp link optimization for DSC eDP case */
if (stream->timing.flags.DSC) {
@@ -967,9 +970,6 @@ bool link_decide_link_settings(struct dc_stream_state *stream,
} else {
edp_decide_link_settings(link, link_setting, req_bw);
}
- } else if (stream->signal == SIGNAL_TYPE_VIRTUAL) {
- link_setting->lane_count = LANE_COUNT_FOUR;
- link_setting->link_rate = LINK_RATE_HIGH3;
} else {
decide_dp_link_settings(link, link_setting, req_bw);
}
@@ -1502,7 +1502,7 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
{
- uint8_t lttpr_dpcd_data[8] = {0};
+ uint8_t lttpr_dpcd_data[10] = {0};
enum dc_status status;
bool is_lttpr_present;
@@ -1552,6 +1552,10 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+ link->dpcd_caps.lttpr_caps.alpm.raw =
+ lttpr_dpcd_data[DP_LTTPR_ALPM_CAPABILITIES -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
/* If this chip cap is set, at least one retimer must exist in the chain
* Override count to 1 if we receive a known bad count (0 or an invalid value) */
if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
index 2c73ac87cd66..c27ffec5d84f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
@@ -75,7 +75,8 @@ void dp_disable_link_phy(struct dc_link *link,
struct dc *dc = link->ctx->dc;
if (!link->wa_flags.dp_keep_receiver_powered &&
- !link->skip_implict_edp_power_control)
+ !link->skip_implict_edp_power_control &&
+ link->type != dc_connection_none)
dpcd_write_rx_power_ctrl(link, false);
dc->hwss.disable_link_output(link, link_res, signal);
@@ -163,8 +164,9 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource
} else {
if (link->fec_state == dc_link_fec_ready) {
fec_config = 0;
- core_link_write_dpcd(link, DP_FEC_CONFIGURATION,
- &fec_config, sizeof(fec_config));
+ if (link->type != dc_connection_none)
+ core_link_write_dpcd(link, DP_FEC_CONFIGURATION,
+ &fec_config, sizeof(fec_config));
link_enc->funcs->fec_set_ready(link_enc, false);
link->fec_state = dc_link_fec_not_ready;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 751c18e592ea..7848ddb94456 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -1782,13 +1782,10 @@ bool perform_link_training_with_retries(
is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
(cur_link_settings.lane_count <= LANE_COUNT_ONE));
- if (is_link_bw_low) {
+ if (is_link_bw_low)
DC_LOG_WARNING(
"%s: Link(%d) bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n",
__func__, link->link_index, req_bw, link_bw);
-
- return false;
- }
}
msleep(delay_between_attempts);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
index 3bdce32a85e3..ae95ec48e572 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
@@ -36,7 +36,8 @@
link->ctx->logger
static int32_t get_cr_training_aux_rd_interval(struct dc_link *link,
- const struct dc_link_settings *link_settings)
+ const struct dc_link_settings *link_settings,
+ enum lttpr_mode lttpr_mode)
{
union training_aux_rd_interval training_rd_interval;
uint32_t wait_in_micro_secs = 100;
@@ -49,6 +50,8 @@ static int32_t get_cr_training_aux_rd_interval(struct dc_link *link,
DP_TRAINING_AUX_RD_INTERVAL,
(uint8_t *)&training_rd_interval,
sizeof(training_rd_interval));
+ if (lttpr_mode != LTTPR_MODE_NON_TRANSPARENT)
+ wait_in_micro_secs = 400;
if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
}
@@ -110,7 +113,6 @@ void decide_8b_10b_training_settings(
*/
lt_settings->link_settings.link_spread = link->dp_ss_off ?
LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ;
- lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting);
lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting);
lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting);
lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting);
@@ -119,6 +121,7 @@ void decide_8b_10b_training_settings(
lt_settings->disallow_per_lane_settings = true;
lt_settings->always_match_dpcd_with_hw_lane_settings = true;
lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link);
+ lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting, lt_settings->lttpr_mode);
dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index e0e3bb865359..1e4adbc764ea 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -675,6 +675,18 @@ bool edp_setup_psr(struct dc_link *link,
if (!link)
return false;
+ //Clear PSR cfg
+ memset(&psr_configuration, 0, sizeof(psr_configuration));
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ DP_PSR_EN_CFG,
+ &psr_configuration.raw,
+ sizeof(psr_configuration.raw));
+
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)
+ return false;
+
dc = link->ctx->dc;
dmcu = dc->res_pool->dmcu;
psr = dc->res_pool->psr;
@@ -685,9 +697,6 @@ bool edp_setup_psr(struct dc_link *link,
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
return false;
-
- memset(&psr_configuration, 0, sizeof(psr_configuration));
-
psr_configuration.bits.ENABLE = 1;
psr_configuration.bits.CRC_VERIFICATION = 1;
psr_configuration.bits.FRAME_CAPTURE_INDICATION =
@@ -950,6 +959,16 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
if (!link)
return false;
+ //Clear Replay config
+ dm_helpers_dp_write_dpcd(link->ctx, link,
+ DP_SINK_PR_ENABLE_AND_CONFIGURATION,
+ (uint8_t *)&(replay_config.raw), sizeof(uint8_t));
+
+ if (!(link->replay_settings.config.replay_supported))
+ return false;
+
+ link->replay_settings.config.replay_error_status.raw = 0;
+
dc = link->ctx->dc;
replay = dc->res_pool->replay;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
index 14acef036b5a..6c2bb3f63be1 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
@@ -1698,7 +1698,7 @@ static int dcn315_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
DC_FP_START();
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
- if (pixel_rate_crb && !pipe->top_pipe && !pipe->prev_odm_pipe) {
+ if (pixel_rate_crb) {
int bpp = source_format_to_bpp(pipes[pipe_cnt].pipe.src.source_format);
/* Ceil to crb segment size */
int approx_det_segs_required_for_pstate = dcn_get_approx_det_segs_required_for_pstate(
@@ -1755,28 +1755,26 @@ static int dcn315_populate_dml_pipes_from_context(
continue;
}
- if (!pipe->top_pipe && !pipe->prev_odm_pipe) {
- bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)
- || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
-
- if (remaining_det_segs > MIN_RESERVED_DET_SEGS && crb_pipes != 0)
- pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes +
- (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0);
- if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) {
- /* Clamp to 2 pipe split max det segments */
- remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS);
- pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS;
- }
- if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) {
- /* If we are splitting we must have an even number of segments */
- remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2;
- pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2;
- }
- /* Convert segments into size for DML use */
- pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB;
-
- crb_idx++;
+ bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)
+ || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
+
+ if (remaining_det_segs > MIN_RESERVED_DET_SEGS && crb_pipes != 0)
+ pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes +
+ (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0);
+ if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) {
+ /* Clamp to 2 pipe split max det segments */
+ remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS);
+ pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS;
+ }
+ if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) {
+ /* If we are splitting we must have an even number of segments */
+ remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2;
+ pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2;
}
+ /* Convert segments into size for DML use */
+ pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB;
+
+ crb_idx++;
pipe_cnt++;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
index 38a9a0d68058..047f05ab0181 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
@@ -8,7 +8,7 @@
#include "dc_spl_isharp_filters.h"
#include "spl_debug.h"
-#define IDENTITY_RATIO(ratio) (spl_fixpt_u2d19(ratio) == (1 << 19))
+#define IDENTITY_RATIO(ratio) (spl_fixpt_u3d19(ratio) == (1 << 19))
#define MIN_VIEWPORT_SIZE 12
static bool spl_is_yuv420(enum spl_pixel_format format)
@@ -76,6 +76,21 @@ static struct spl_rect shift_rec(const struct spl_rect *rec_in, int x, int y)
return rec_out;
}
+static void spl_opp_adjust_rect(struct spl_rect *rec, const struct spl_opp_adjust *adjust)
+{
+ if ((rec->x + adjust->x) >= 0)
+ rec->x += adjust->x;
+
+ if ((rec->y + adjust->y) >= 0)
+ rec->y += adjust->y;
+
+ if ((rec->width + adjust->width) >= 1)
+ rec->width += adjust->width;
+
+ if ((rec->height + adjust->height) >= 1)
+ rec->height += adjust->height;
+}
+
static struct spl_rect calculate_plane_rec_in_timing_active(
struct spl_in *spl_in,
const struct spl_rect *rec_in)
@@ -723,13 +738,15 @@ static void spl_handle_3d_recout(struct spl_in *spl_in, struct spl_rect *recout)
}
}
-static void spl_clamp_viewport(struct spl_rect *viewport)
+static void spl_clamp_viewport(struct spl_rect *viewport, int min_viewport_size)
{
+ if (min_viewport_size == 0)
+ min_viewport_size = MIN_VIEWPORT_SIZE;
/* Clamp minimum viewport size */
- if (viewport->height < MIN_VIEWPORT_SIZE)
- viewport->height = MIN_VIEWPORT_SIZE;
- if (viewport->width < MIN_VIEWPORT_SIZE)
- viewport->width = MIN_VIEWPORT_SIZE;
+ if (viewport->height < min_viewport_size)
+ viewport->height = min_viewport_size;
+ if (viewport->width < min_viewport_size)
+ viewport->width = min_viewport_size;
}
static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in,
@@ -767,25 +784,13 @@ static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in,
return SCL_MODE_SCALING_420_YCBCR_ENABLE;
}
-static bool spl_choose_lls_policy(enum spl_pixel_format format,
- enum spl_transfer_func_type tf_type,
- enum spl_transfer_func_predefined tf_predefined_type,
+static void spl_choose_lls_policy(enum spl_pixel_format format,
enum linear_light_scaling *lls_pref)
{
- if (spl_is_video_format(format)) {
+ if (spl_is_subsampled_format(format))
*lls_pref = LLS_PREF_NO;
- if ((tf_type == SPL_TF_TYPE_PREDEFINED) ||
- (tf_type == SPL_TF_TYPE_DISTRIBUTED_POINTS))
- return true;
- } else { /* RGB or YUV444 */
- if ((tf_type == SPL_TF_TYPE_PREDEFINED) ||
- (tf_type == SPL_TF_TYPE_BYPASS)) {
- *lls_pref = LLS_PREF_YES;
- return true;
- }
- }
- *lls_pref = LLS_PREF_NO;
- return false;
+ else /* RGB or YUV444 */
+ *lls_pref = LLS_PREF_YES;
}
/* Enable EASF ?*/
@@ -794,7 +799,6 @@ static bool enable_easf(struct spl_in *spl_in, struct spl_scratch *spl_scratch)
int vratio = 0;
int hratio = 0;
bool skip_easf = false;
- bool lls_enable_easf = true;
if (spl_in->disable_easf)
skip_easf = true;
@@ -810,17 +814,13 @@ static bool enable_easf(struct spl_in *spl_in, struct spl_scratch *spl_scratch)
skip_easf = true;
/*
- * If lls_pref is LLS_PREF_DONT_CARE, then use pixel format and transfer
- * function to determine whether to use LINEAR or NONLINEAR scaling
+ * If lls_pref is LLS_PREF_DONT_CARE, then use pixel format
+ * to determine whether to use LINEAR or NONLINEAR scaling
*/
if (spl_in->lls_pref == LLS_PREF_DONT_CARE)
- lls_enable_easf = spl_choose_lls_policy(spl_in->basic_in.format,
- spl_in->basic_in.tf_type, spl_in->basic_in.tf_predefined_type,
+ spl_choose_lls_policy(spl_in->basic_in.format,
&spl_in->lls_pref);
- if (!lls_enable_easf)
- skip_easf = true;
-
/* Check for linear scaling or EASF preferred */
if (spl_in->lls_pref != LLS_PREF_YES && !spl_in->prefer_easf)
skip_easf = true;
@@ -887,6 +887,8 @@ static bool spl_get_isharp_en(struct spl_in *spl_in,
static void spl_get_taps_non_adaptive_scaler(
struct spl_scratch *spl_scratch, const struct spl_taps *in_taps)
{
+ bool check_max_downscale = false;
+
if (in_taps->h_taps == 0) {
if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz) > 1)
spl_scratch->scl_data.taps.h_taps = spl_min(2 * spl_fixpt_ceil(
@@ -926,6 +928,23 @@ static void spl_get_taps_non_adaptive_scaler(
else
spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c;
+
+ /*
+ * Max downscale supported is 6.0x. Add ASSERT to catch if go beyond that
+ */
+ check_max_downscale = spl_fixpt_le(spl_scratch->scl_data.ratios.horz,
+ spl_fixpt_from_fraction(6, 1));
+ SPL_ASSERT(check_max_downscale);
+ check_max_downscale = spl_fixpt_le(spl_scratch->scl_data.ratios.vert,
+ spl_fixpt_from_fraction(6, 1));
+ SPL_ASSERT(check_max_downscale);
+ check_max_downscale = spl_fixpt_le(spl_scratch->scl_data.ratios.horz_c,
+ spl_fixpt_from_fraction(6, 1));
+ SPL_ASSERT(check_max_downscale);
+ check_max_downscale = spl_fixpt_le(spl_scratch->scl_data.ratios.vert_c,
+ spl_fixpt_from_fraction(6, 1));
+ SPL_ASSERT(check_max_downscale);
+
if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz))
spl_scratch->scl_data.taps.h_taps = 1;
if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))
@@ -944,8 +963,8 @@ static bool spl_get_optimal_number_of_taps(
bool *enable_isharp)
{
int num_part_y, num_part_c;
- int max_taps_y, max_taps_c;
- int min_taps_y, min_taps_c;
+ unsigned int max_taps_y, max_taps_c;
+ unsigned int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
bool skip_easf = false;
bool is_subsampled = spl_is_subsampled_format(spl_in->basic_in.format);
@@ -1781,6 +1800,8 @@ static bool spl_calculate_number_of_taps(struct spl_in *spl_in, struct spl_scrat
spl_calculate_recout(spl_in, spl_scratch, spl_out);
/* depends on pixel format */
spl_calculate_scaling_ratios(spl_in, spl_scratch, spl_out);
+ /* Adjust recout for opp if needed */
+ spl_opp_adjust_rect(&spl_scratch->scl_data.recout, &spl_in->basic_in.opp_recout_adjust);
/* depends on scaling ratios and recout, does not calculate offset yet */
spl_calculate_viewport_size(spl_in, spl_scratch);
@@ -1817,7 +1838,7 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out)
// Handle 3d recout
spl_handle_3d_recout(spl_in, &spl_scratch.scl_data.recout);
// Clamp
- spl_clamp_viewport(&spl_scratch.scl_data.viewport);
+ spl_clamp_viewport(&spl_scratch.scl_data.viewport, spl_in->min_viewport_size);
// Save all calculated parameters in dscl_prog_data structure to program hw registers
spl_set_dscl_prog_data(spl_in, &spl_scratch, spl_out, enable_easf_v, enable_easf_h, enable_isharp);
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
index 467af9dd90de..1c3949b24611 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
@@ -427,6 +427,14 @@ struct spl_out {
// SPL inputs
+// opp extra adjustment for rect
+struct spl_opp_adjust {
+ int x;
+ int y;
+ int width;
+ int height;
+};
+
// Basic input information
struct basic_in {
enum spl_pixel_format format; // Pixel Format
@@ -444,6 +452,7 @@ struct basic_in {
} num_slices_recout_width;
} num_h_slices_recout_width_align;
int mpc_h_slice_index; // previous mpc_combine_v - split_idx
+ struct spl_opp_adjust opp_recout_adjust;
// Inputs for adaptive scaler - TODO
enum spl_transfer_func_type tf_type; /* Transfer function type */
enum spl_transfer_func_predefined tf_predefined_type; /* Transfer function predefined type */
@@ -484,7 +493,7 @@ struct spl_sharpness_range {
};
struct adaptive_sharpness {
bool enable;
- int sharpness_level;
+ unsigned int sharpness_level;
struct spl_sharpness_range sharpness_range;
};
enum linear_light_scaling { // convert it in translation logic
@@ -535,6 +544,7 @@ struct spl_in {
bool is_hdr_on;
int h_active;
int v_active;
+ int min_viewport_size;
int sdr_white_level_nits;
enum sharpen_policy sharpen_policy;
};
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c
index 131f1e3949d3..52d97918a3bd 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c
@@ -346,7 +346,7 @@ struct spl_fixed31_32 spl_fixpt_exp(struct spl_fixed31_32 arg)
if (m > 0)
return spl_fixpt_shl(
spl_fixed31_32_exp_from_taylor_series(r),
- (unsigned char)m);
+ (unsigned int)m);
else
return spl_fixpt_div_int(
spl_fixed31_32_exp_from_taylor_series(r),
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h
index ed2647f9a099..9f349ffe9148 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h
@@ -189,7 +189,7 @@ static inline struct spl_fixed31_32 spl_fixpt_clamp(
* @brief
* result = arg << shift
*/
-static inline struct spl_fixed31_32 spl_fixpt_shl(struct spl_fixed31_32 arg, unsigned char shift)
+static inline struct spl_fixed31_32 spl_fixpt_shl(struct spl_fixed31_32 arg, unsigned int shift)
{
SPL_ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
((arg.value < 0) && (arg.value >= ~(LLONG_MAX >> shift))));
@@ -203,7 +203,7 @@ static inline struct spl_fixed31_32 spl_fixpt_shl(struct spl_fixed31_32 arg, uns
* @brief
* result = arg >> shift
*/
-static inline struct spl_fixed31_32 spl_fixpt_shr(struct spl_fixed31_32 arg, unsigned char shift)
+static inline struct spl_fixed31_32 spl_fixpt_shr(struct spl_fixed31_32 arg, unsigned int shift)
{
bool negative = arg.value < 0;
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index d0fe324cb537..8cf89aed024b 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -3118,6 +3118,12 @@ struct dmub_cmd_psr_copy_settings_data {
* Some panels request main link off before xth vertical line
*/
uint16_t poweroff_before_vertical_line;
+ /**
+ * Some panels cannot handle idle pattern during PSR entry.
+ * To power down phy before disable stream to avoid sending
+ * idle pattern.
+ */
+ uint8_t power_down_phy_before_disable_stream;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index d9f31b191c69..1a68b5782cac 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -83,8 +83,8 @@ static inline void dmub_dcn31_translate_addr(const union dmub_addr *addr_in,
void dmub_dcn31_reset(struct dmub_srv *dmub)
{
union dmub_gpint_data_register cmd;
- const uint32_t timeout = 100;
- uint32_t in_reset, scratch, i, pwait_mode;
+ const uint32_t timeout = 100000;
+ uint32_t in_reset, is_enabled, scratch, i, pwait_mode;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
@@ -108,7 +108,7 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
}
for (i = 0; i < timeout; ++i) {
- scratch = dmub->hw_funcs.get_gpint_response(dmub);
+ scratch = REG_READ(DMCUB_SCRATCH7);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
break;
@@ -125,9 +125,14 @@ void dmub_dcn31_reset(struct dmub_srv *dmub)
/* Force reset in case we timed out, DMCUB is likely hung. */
}
- REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
- REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled);
+
+ if (is_enabled) {
+ REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
+ REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
+ }
+
REG_WRITE(DMCUB_INBOX1_RPTR, 0);
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index e5e77bd3c31e..01d013a12b94 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -88,7 +88,7 @@ static inline void dmub_dcn35_translate_addr(const union dmub_addr *addr_in,
void dmub_dcn35_reset(struct dmub_srv *dmub)
{
union dmub_gpint_data_register cmd;
- const uint32_t timeout = 100;
+ const uint32_t timeout = 100000;
uint32_t in_reset, is_enabled, scratch, i, pwait_mode;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
@@ -113,7 +113,7 @@ void dmub_dcn35_reset(struct dmub_srv *dmub)
}
for (i = 0; i < timeout; ++i) {
- scratch = dmub->hw_funcs.get_gpint_response(dmub);
+ scratch = REG_READ(DMCUB_SCRATCH7);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
break;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
index 39a8cb6d7523..e1c4fe1c6e3e 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
@@ -63,8 +63,10 @@ static inline void dmub_dcn401_translate_addr(const union dmub_addr *addr_in,
void dmub_dcn401_reset(struct dmub_srv *dmub)
{
union dmub_gpint_data_register cmd;
- const uint32_t timeout = 30;
- uint32_t in_reset, scratch, i;
+ const uint32_t timeout_us = 1 * 1000 * 1000; //1s
+ const uint32_t poll_delay_us = 1; //1us
+ uint32_t i = 0;
+ uint32_t in_reset, scratch, pwait_mode;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
@@ -75,32 +77,35 @@ void dmub_dcn401_reset(struct dmub_srv *dmub)
dmub->hw_funcs.set_gpint(dmub, cmd);
- /**
- * Timeout covers both the ACK and the wait
- * for remaining work to finish.
- *
- * This is mostly bound by the PHY disable sequence.
- * Each register check will be greater than 1us, so
- * don't bother using udelay.
- */
-
- for (i = 0; i < timeout; ++i) {
+ for (i = 0; i < timeout_us; i++) {
if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
break;
+
+ udelay(poll_delay_us);
}
- for (i = 0; i < timeout; ++i) {
+ for (; i < timeout_us; i++) {
scratch = dmub->hw_funcs.get_gpint_response(dmub);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
break;
+
+ udelay(poll_delay_us);
}
- /* Force reset in case we timed out, DMCUB is likely hung. */
+ for (; i < timeout_us; i++) {
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode);
+ if (pwait_mode & (1 << 0))
+ break;
+
+ udelay(poll_delay_us);
+ }
+ }
+
+ if (i >= timeout_us) {
+ /* timeout should never occur */
+ BREAK_TO_DEBUGGER();
}
- REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
- REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
REG_WRITE(DMCUB_INBOX1_RPTR, 0);
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
@@ -131,7 +136,10 @@ void dmub_dcn401_backdoor_load(struct dmub_srv *dmub,
dmub_dcn401_get_fb_base_offset(dmub, &fb_base, &fb_offset);
+ /* reset and disable DMCUB and MMHUBBUB DMUIF */
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
+ REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
dmub_dcn401_translate_addr(&cw0->offset, fb_base, fb_offset, &offset);
@@ -151,6 +159,7 @@ void dmub_dcn401_backdoor_load(struct dmub_srv *dmub,
DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top,
DMCUB_REGION3_CW1_ENABLE, 1);
+ /* release DMCUB reset only to prevent premature execution */
REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID,
0x20);
}
@@ -161,7 +170,10 @@ void dmub_dcn401_backdoor_load_zfb_mode(struct dmub_srv *dmub,
{
union dmub_addr offset;
+ /* reset and disable DMCUB and MMHUBBUB DMUIF */
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
+ REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
offset = cw0->offset;
@@ -181,6 +193,7 @@ void dmub_dcn401_backdoor_load_zfb_mode(struct dmub_srv *dmub,
DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top,
DMCUB_REGION3_CW1_ENABLE, 1);
+ /* release DMCUB reset only to prevent premature execution */
REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID,
0x20);
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
index 4c8843b79695..31f95b27e227 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h
@@ -169,7 +169,8 @@ struct dmub_srv;
DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_EN) \
DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_ACK) \
DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_STAT) \
- DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_EN)
+ DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_OUTBOX0_RDY_INT_EN) \
+ DMUB_SF(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS)
struct dmub_srv_dcn401_reg_offset {
#define DMUB_SR(reg) uint32_t reg;
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index a344e2e49b0e..b3d55cac3569 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -383,10 +383,10 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
colorimetryFormat = ColorimetryYCC_DP_ITU709;
else if (cs == COLOR_SPACE_ADOBERGB)
colorimetryFormat = ColorimetryYCC_DP_AdobeYCC;
- else if (cs == COLOR_SPACE_2020_YCBCR)
+ else if (cs == COLOR_SPACE_2020_YCBCR_LIMITED)
colorimetryFormat = ColorimetryYCC_DP_ITU2020YCbCr;
- if (cs == COLOR_SPACE_2020_YCBCR && tf == TRANSFER_FUNC_GAMMA_22)
+ if (cs == COLOR_SPACE_2020_YCBCR_LIMITED && tf == TRANSFER_FUNC_GAMMA_22)
colorimetryFormat = ColorimetryYCC_DP_ITU709;
break;
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
index c488d4a50cf4..b2252deabc17 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
@@ -203,6 +203,10 @@
#define mmDAGB0_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB0_WR_MISC_CREDIT 0x0058
#define mmDAGB0_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE 0x005b
+#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x005c
+#define mmDAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB0_WRCLI_ASK_PENDING 0x005d
#define mmDAGB0_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB0_WRCLI_GO_PENDING 0x005e
@@ -455,6 +459,10 @@
#define mmDAGB1_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB1_WR_MISC_CREDIT 0x00d8
#define mmDAGB1_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE 0x00db
+#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x00dc
+#define mmDAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB1_WRCLI_ASK_PENDING 0x00dd
#define mmDAGB1_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB1_WRCLI_GO_PENDING 0x00de
@@ -707,6 +715,10 @@
#define mmDAGB2_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB2_WR_MISC_CREDIT 0x0158
#define mmDAGB2_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE 0x015b
+#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x015c
+#define mmDAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB2_WRCLI_ASK_PENDING 0x015d
#define mmDAGB2_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB2_WRCLI_GO_PENDING 0x015e
@@ -959,6 +971,10 @@
#define mmDAGB3_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB3_WR_MISC_CREDIT 0x01d8
#define mmDAGB3_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE 0x01db
+#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x01dc
+#define mmDAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB3_WRCLI_ASK_PENDING 0x01dd
#define mmDAGB3_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB3_WRCLI_GO_PENDING 0x01de
@@ -1211,6 +1227,10 @@
#define mmDAGB4_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB4_WR_MISC_CREDIT 0x0258
#define mmDAGB4_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE 0x025b
+#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x025c
+#define mmDAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB4_WRCLI_ASK_PENDING 0x025d
#define mmDAGB4_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB4_WRCLI_GO_PENDING 0x025e
@@ -4793,6 +4813,10 @@
#define mmDAGB5_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB5_WR_MISC_CREDIT 0x3058
#define mmDAGB5_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE 0x305b
+#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x305c
+#define mmDAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB5_WRCLI_ASK_PENDING 0x305d
#define mmDAGB5_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB5_WRCLI_GO_PENDING 0x305e
@@ -5045,6 +5069,10 @@
#define mmDAGB6_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB6_WR_MISC_CREDIT 0x30d8
#define mmDAGB6_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE 0x30db
+#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x30dc
+#define mmDAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB6_WRCLI_ASK_PENDING 0x30dd
#define mmDAGB6_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB6_WRCLI_GO_PENDING 0x30de
@@ -5297,6 +5325,10 @@
#define mmDAGB7_WR_DATA_CREDIT_BASE_IDX 1
#define mmDAGB7_WR_MISC_CREDIT 0x3158
#define mmDAGB7_WR_MISC_CREDIT_BASE_IDX 1
+#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE 0x315b
+#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE_BASE_IDX 1
+#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE 0x315c
+#define mmDAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE_BASE_IDX 1
#define mmDAGB7_WRCLI_ASK_PENDING 0x315d
#define mmDAGB7_WRCLI_ASK_PENDING_BASE_IDX 1
#define mmDAGB7_WRCLI_GO_PENDING 0x315e
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
index 2969fbf282b7..5069d2fd467f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
@@ -1532,6 +1532,12 @@
//DAGB0_WRCLI_DBUS_GO_PENDING
#define DAGB0_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB0_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB0_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB0_DAGB_DLY
#define DAGB0_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB0_DAGB_DLY__CLI__SHIFT 0x8
@@ -3207,6 +3213,12 @@
//DAGB1_WRCLI_DBUS_GO_PENDING
#define DAGB1_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB1_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB1_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB1_DAGB_DLY
#define DAGB1_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB1_DAGB_DLY__CLI__SHIFT 0x8
@@ -4882,6 +4894,12 @@
//DAGB2_WRCLI_DBUS_GO_PENDING
#define DAGB2_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB2_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB2_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB2_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB2_DAGB_DLY
#define DAGB2_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB2_DAGB_DLY__CLI__SHIFT 0x8
@@ -6557,6 +6575,12 @@
//DAGB3_WRCLI_DBUS_GO_PENDING
#define DAGB3_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB3_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB3_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB3_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB3_DAGB_DLY
#define DAGB3_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB3_DAGB_DLY__CLI__SHIFT 0x8
@@ -8232,6 +8256,12 @@
//DAGB4_WRCLI_DBUS_GO_PENDING
#define DAGB4_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB4_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB4_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB4_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB4_DAGB_DLY
#define DAGB4_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB4_DAGB_DLY__CLI__SHIFT 0x8
@@ -28737,6 +28767,12 @@
//DAGB5_WRCLI_DBUS_GO_PENDING
#define DAGB5_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB5_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB5_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB5_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB5_DAGB_DLY
#define DAGB5_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB5_DAGB_DLY__CLI__SHIFT 0x8
@@ -30412,6 +30448,12 @@
//DAGB6_WRCLI_DBUS_GO_PENDING
#define DAGB6_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB6_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB6_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB6_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB6_DAGB_DLY
#define DAGB6_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB6_DAGB_DLY__CLI__SHIFT 0x8
@@ -32087,6 +32129,12 @@
//DAGB7_WRCLI_DBUS_GO_PENDING
#define DAGB7_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0
#define DAGB7_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL
+//DAGB7_WRCLI_GPU_SNOOP_OVERRIDE
+#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE__SHIFT 0x0
+#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE__ENABLE_MASK 0xFFFFFFFFL
+//DAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE
+#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE__SHIFT 0x0
+#define DAGB7_WRCLI_GPU_SNOOP_OVERRIDE_VALUE__ENABLE_MASK 0xFFFFFFFFL
//DAGB7_DAGB_DLY
#define DAGB7_DAGB_DLY__DLY__SHIFT 0x0
#define DAGB7_DAGB_DLY__CLI__SHIFT 0x8
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index ed9dac00ebfb..f3f5b7dd15cc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -2802,6 +2802,7 @@ int smu_get_power_limit(void *handle,
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
case IP_VERSION(13, 0, 14):
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index da7bd9227afe..5f2a824918e3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -450,8 +450,9 @@ static int smu_v13_0_6_init_microcode(struct smu_context *smu)
int var = (adev->pdev->device & 0xF);
char ucode_prefix[15];
- /* No need to load P2S tables in IOV mode */
- if (amdgpu_sriov_vf(adev))
+ /* No need to load P2S tables in IOV mode or for smu v13.0.12 */
+ if (amdgpu_sriov_vf(adev) ||
+ (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)))
return 0;
if (!(adev->flags & AMD_IS_APU)) {
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index bc37c65305d4..96470fc8e6e5 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -96,21 +96,21 @@ static void ast_detect_tx_chip(struct ast_device *ast, bool need_post)
/* Check 3rd Tx option (digital output afaik) */
ast->tx_chip = AST_TX_NONE;
- /*
- * VGACRA3 Enhanced Color Mode Register, check if DVO is already
- * enabled, in that case, assume we have a SIL164 TMDS transmitter
- *
- * Don't make that assumption if we the chip wasn't enabled and
- * is at power-on reset, otherwise we'll incorrectly "detect" a
- * SIL164 when there is none.
- */
- if (!need_post) {
- jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff);
- if (jreg & 0x80)
- ast->tx_chip = AST_TX_SIL164;
- }
-
- if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast)) {
+ if (AST_GEN(ast) <= 3) {
+ /*
+ * VGACRA3 Enhanced Color Mode Register, check if DVO is already
+ * enabled, in that case, assume we have a SIL164 TMDS transmitter
+ *
+ * Don't make that assumption if we the chip wasn't enabled and
+ * is at power-on reset, otherwise we'll incorrectly "detect" a
+ * SIL164 when there is none.
+ */
+ if (!need_post) {
+ jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff);
+ if (jreg & 0x80)
+ ast->tx_chip = AST_TX_SIL164;
+ }
+ } else if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast)) {
/*
* On AST GEN4+, look the configuration set by the SoC in
* the SOC scratch register #1 bits 11:8 (interestingly marked
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 9d5321c81e68..a29fe1ae803f 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -131,7 +131,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
return false;
}
- switch (mode->crtc_hdisplay) {
+ switch (mode->hdisplay) {
case 640:
vbios_mode->enh_table = &res_640x480[refresh_rate_index];
break;
@@ -145,7 +145,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
vbios_mode->enh_table = &res_1152x864[refresh_rate_index];
break;
case 1280:
- if (mode->crtc_vdisplay == 800)
+ if (mode->vdisplay == 800)
vbios_mode->enh_table = &res_1280x800[refresh_rate_index];
else
vbios_mode->enh_table = &res_1280x1024[refresh_rate_index];
@@ -157,7 +157,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
vbios_mode->enh_table = &res_1440x900[refresh_rate_index];
break;
case 1600:
- if (mode->crtc_vdisplay == 900)
+ if (mode->vdisplay == 900)
vbios_mode->enh_table = &res_1600x900[refresh_rate_index];
else
vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
@@ -166,7 +166,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
vbios_mode->enh_table = &res_1680x1050[refresh_rate_index];
break;
case 1920:
- if (mode->crtc_vdisplay == 1080)
+ if (mode->vdisplay == 1080)
vbios_mode->enh_table = &res_1920x1080[refresh_rate_index];
else
vbios_mode->enh_table = &res_1920x1200[refresh_rate_index];
@@ -210,6 +210,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0;
vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0;
+ adjusted_mode->crtc_hdisplay = vbios_mode->enh_table->hde;
adjusted_mode->crtc_htotal = vbios_mode->enh_table->ht;
adjusted_mode->crtc_hblank_start = vbios_mode->enh_table->hde + hborder;
adjusted_mode->crtc_hblank_end = vbios_mode->enh_table->ht - hborder;
@@ -219,6 +220,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
vbios_mode->enh_table->hfp +
vbios_mode->enh_table->hsync);
+ adjusted_mode->crtc_vdisplay = vbios_mode->enh_table->vde;
adjusted_mode->crtc_vtotal = vbios_mode->enh_table->vt;
adjusted_mode->crtc_vblank_start = vbios_mode->enh_table->vde + vborder;
adjusted_mode->crtc_vblank_end = vbios_mode->enh_table->vt - vborder;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index 657bc3dd18df..98030500a978 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -245,7 +245,9 @@ static const struct hdmi_codec_pdata codec_data = {
.ops = &adv7511_codec_ops,
.max_i2s_channels = 2,
.i2s = 1,
+ .no_i2s_capture = 1,
.spdif = 1,
+ .no_spdif_capture = 1,
};
int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 32902f77f00d..40e4e1b6c911 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -574,6 +574,30 @@ mode_valid(struct drm_atomic_state *state)
return 0;
}
+static int drm_atomic_check_valid_clones(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ struct drm_encoder *drm_enc;
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
+
+ drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask) {
+ if (!drm_enc->possible_clones) {
+ DRM_DEBUG("enc%d possible_clones is 0\n", drm_enc->base.id);
+ continue;
+ }
+
+ if ((crtc_state->encoder_mask & drm_enc->possible_clones) !=
+ crtc_state->encoder_mask) {
+ DRM_DEBUG("crtc%d failed valid clone check for mask 0x%x\n",
+ crtc->base.id, crtc_state->encoder_mask);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
/**
* drm_atomic_helper_check_modeset - validate state object for modeset changes
* @dev: DRM device
@@ -745,6 +769,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret != 0)
return ret;
+
+ ret = drm_atomic_check_valid_clones(state, crtc);
+ if (ret != 0)
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 103c185bb1c8..ca42e6081d27 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -324,7 +324,7 @@ EXPORT_SYMBOL(drm_buddy_init);
*/
void drm_buddy_fini(struct drm_buddy *mm)
{
- u64 root_size, size;
+ u64 root_size, size, start;
unsigned int order;
int i;
@@ -332,7 +332,8 @@ void drm_buddy_fini(struct drm_buddy *mm)
for (i = 0; i < mm->n_roots; ++i) {
order = ilog2(size) - ilog2(mm->chunk_size);
- __force_merge(mm, 0, size, order);
+ start = drm_buddy_block_offset(mm->roots[i]);
+ __force_merge(mm, start, start + size, order);
WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
drm_block_free(mm, mm->roots[i]);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 13bc4c290b17..9edb3247c767 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -6596,6 +6596,7 @@ static void drm_reset_display_info(struct drm_connector *connector)
info->has_hdmi_infoframe = false;
info->rgb_quant_range_selectable = false;
memset(&info->hdmi, 0, sizeof(info->hdmi));
+ memset(&connector->hdr_sink_metadata, 0, sizeof(connector->hdr_sink_metadata));
info->edid_hdmi_rgb444_dc_modes = 0;
info->edid_hdmi_ycbcr444_dc_modes = 0;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index c299cd94d3f7..cf2463090d3a 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -964,6 +964,10 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f)
struct drm_file *file = f->private_data;
struct drm_device *dev = file->minor->dev;
struct drm_printer p = drm_seq_file_printer(m);
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
drm_printf(&p, "drm-driver:\t%s\n", dev->driver->name);
drm_printf(&p, "drm-client-id:\t%llu\n", file->client_id);
@@ -983,6 +987,8 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f)
if (dev->driver->show_fdinfo)
dev->driver->show_fdinfo(&p, file);
+
+ drm_dev_exit(idx);
}
EXPORT_SYMBOL(drm_show_fdinfo);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ee811764c3df..c6240bab3fa5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -348,7 +348,7 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
return -ENOENT;
/* Don't allow imported objects to be mapped */
- if (obj->import_attach) {
+ if (drm_gem_is_imported(obj)) {
ret = -EINVAL;
goto out;
}
@@ -1178,7 +1178,7 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
drm_vma_node_start(&obj->vma_node));
drm_printf_indent(p, indent, "size=%zu\n", obj->size);
drm_printf_indent(p, indent, "imported=%s\n",
- str_yes_no(obj->import_attach));
+ str_yes_no(drm_gem_is_imported(obj)));
if (obj->funcs->print_info)
obj->funcs->print_info(p, indent, obj);
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 34bca7567576..3ea9f23b4f67 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -404,12 +404,16 @@ static void mipi_dbi_blank(struct mipi_dbi_dev *dbidev)
u16 height = drm->mode_config.min_height;
u16 width = drm->mode_config.min_width;
struct mipi_dbi *dbi = &dbidev->dbi;
- size_t len = width * height * 2;
+ const struct drm_format_info *dst_format;
+ size_t len;
int idx;
if (!drm_dev_enter(drm, &idx))
return;
+ dst_format = drm_format_info(dbidev->pixel_format);
+ len = drm_format_info_min_pitch(dst_format, 0, width) * height;
+
memset(dbidev->tx_buf, 0, len);
mipi_dbi_set_window_address(dbidev, 0, width - 1, 0, height - 1);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 86d6185fda50..8a6135b179d3 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -221,6 +221,7 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
+ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
fixed20_12 pbn_div;
int bpp, slots = -EINVAL;
int dsc_slice_count = 0;
@@ -271,7 +272,7 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
link_bpp_x16,
&crtc_state->dp_m_n);
- if (intel_dp->is_mst) {
+ if (is_mst) {
int remote_bw_overhead;
int remote_tu;
fixed20_12 pbn;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h
index 9aae779c4da3..4969d3de2bac 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h
@@ -23,6 +23,7 @@ int intel_pxp_gsccs_init(struct intel_pxp *pxp);
int intel_pxp_gsccs_create_session(struct intel_pxp *pxp, int arb_session_id);
void intel_pxp_gsccs_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id);
+bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp);
#else
static inline void intel_pxp_gsccs_fini(struct intel_pxp *pxp)
@@ -34,8 +35,11 @@ static inline int intel_pxp_gsccs_init(struct intel_pxp *pxp)
return 0;
}
-#endif
+static inline bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp)
+{
+ return false;
+}
-bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp);
+#endif
#endif /*__INTEL_PXP_GSCCS_H__ */
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index a12ef24c7742..b7d90574df9a 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -410,12 +410,13 @@ static void mtk_dpi_config_swap_input(struct mtk_dpi *dpi, bool enable)
static void mtk_dpi_config_2n_h_fre(struct mtk_dpi *dpi)
{
- mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, H_FRE_2N, H_FRE_2N);
+ if (dpi->conf->reg_h_fre_con)
+ mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, H_FRE_2N, H_FRE_2N);
}
static void mtk_dpi_config_disable_edge(struct mtk_dpi *dpi)
{
- if (dpi->conf->edge_sel_en)
+ if (dpi->conf->edge_sel_en && dpi->conf->reg_h_fre_con)
mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, 0, EDGE_SEL_EN);
}
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 81d2ee37e773..49ff9f1f16d3 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -169,7 +169,7 @@ static const struct meson_drm_soc_attr meson_drm_soc_attrs[] = {
/* S805X/S805Y HDMI PLL won't lock for HDMI PHY freq > 1,65GHz */
{
.limits = {
- .max_hdmi_phy_freq = 1650000,
+ .max_hdmi_phy_freq = 1650000000,
},
.attrs = (const struct soc_device_attribute []) {
{ .soc_id = "GXL (S805*)", },
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 3f9345c14f31..be4b0e4df6e1 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -37,7 +37,7 @@ struct meson_drm_match_data {
};
struct meson_drm_soc_limits {
- unsigned int max_hdmi_phy_freq;
+ unsigned long long max_hdmi_phy_freq;
};
struct meson_drm {
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index 0593a1cde906..c61e6026adfa 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -70,12 +70,12 @@ static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi,
{
struct meson_drm *priv = encoder_hdmi->priv;
int vic = drm_match_cea_mode(mode);
- unsigned int phy_freq;
- unsigned int vclk_freq;
- unsigned int venc_freq;
- unsigned int hdmi_freq;
+ unsigned long long phy_freq;
+ unsigned long long vclk_freq;
+ unsigned long long venc_freq;
+ unsigned long long hdmi_freq;
- vclk_freq = mode->clock;
+ vclk_freq = mode->clock * 1000ULL;
/* For 420, pixel clock is half unlike venc clock */
if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
@@ -107,7 +107,8 @@ static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
venc_freq /= 2;
- dev_dbg(priv->dev, "vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n",
+ dev_dbg(priv->dev,
+ "vclk:%lluHz phy=%lluHz venc=%lluHz hdmi=%lluHz enci=%d\n",
phy_freq, vclk_freq, venc_freq, hdmi_freq,
priv->venc.hdmi_use_enci);
@@ -122,10 +123,11 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
struct meson_drm *priv = encoder_hdmi->priv;
bool is_hdmi2_sink = display_info->hdmi.scdc.supported;
- unsigned int phy_freq;
- unsigned int vclk_freq;
- unsigned int venc_freq;
- unsigned int hdmi_freq;
+ unsigned long long clock = mode->clock * 1000ULL;
+ unsigned long long phy_freq;
+ unsigned long long vclk_freq;
+ unsigned long long venc_freq;
+ unsigned long long hdmi_freq;
int vic = drm_match_cea_mode(mode);
enum drm_mode_status status;
@@ -144,12 +146,12 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri
if (status != MODE_OK)
return status;
- return meson_vclk_dmt_supported_freq(priv, mode->clock);
+ return meson_vclk_dmt_supported_freq(priv, clock);
/* Check against supported VIC modes */
} else if (!meson_venc_hdmi_supported_vic(vic))
return MODE_BAD;
- vclk_freq = mode->clock;
+ vclk_freq = clock;
/* For 420, pixel clock is half unlike venc clock */
if (drm_mode_is_420_only(display_info, mode) ||
@@ -179,7 +181,8 @@ static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bri
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
venc_freq /= 2;
- dev_dbg(priv->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n",
+ dev_dbg(priv->dev,
+ "%s: vclk:%lluHz phy=%lluHz venc=%lluHz hdmi=%lluHz\n",
__func__, phy_freq, vclk_freq, venc_freq, hdmi_freq);
return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq);
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index 2a942dc6a6dc..3325580d885d 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -110,7 +110,10 @@
#define HDMI_PLL_LOCK BIT(31)
#define HDMI_PLL_LOCK_G12A (3 << 30)
-#define FREQ_1000_1001(_freq) DIV_ROUND_CLOSEST(_freq * 1000, 1001)
+#define PIXEL_FREQ_1000_1001(_freq) \
+ DIV_ROUND_CLOSEST_ULL((_freq) * 1000ULL, 1001ULL)
+#define PHY_FREQ_1000_1001(_freq) \
+ (PIXEL_FREQ_1000_1001(DIV_ROUND_DOWN_ULL(_freq, 10ULL)) * 10)
/* VID PLL Dividers */
enum {
@@ -360,11 +363,11 @@ enum {
};
struct meson_vclk_params {
- unsigned int pll_freq;
- unsigned int phy_freq;
- unsigned int vclk_freq;
- unsigned int venc_freq;
- unsigned int pixel_freq;
+ unsigned long long pll_freq;
+ unsigned long long phy_freq;
+ unsigned long long vclk_freq;
+ unsigned long long venc_freq;
+ unsigned long long pixel_freq;
unsigned int pll_od1;
unsigned int pll_od2;
unsigned int pll_od3;
@@ -372,11 +375,11 @@ struct meson_vclk_params {
unsigned int vclk_div;
} params[] = {
[MESON_VCLK_HDMI_ENCI_54000] = {
- .pll_freq = 4320000,
- .phy_freq = 270000,
- .vclk_freq = 54000,
- .venc_freq = 54000,
- .pixel_freq = 54000,
+ .pll_freq = 4320000000,
+ .phy_freq = 270000000,
+ .vclk_freq = 54000000,
+ .venc_freq = 54000000,
+ .pixel_freq = 54000000,
.pll_od1 = 4,
.pll_od2 = 4,
.pll_od3 = 1,
@@ -384,11 +387,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_DDR_54000] = {
- .pll_freq = 4320000,
- .phy_freq = 270000,
- .vclk_freq = 54000,
- .venc_freq = 54000,
- .pixel_freq = 27000,
+ .pll_freq = 4320000000,
+ .phy_freq = 270000000,
+ .vclk_freq = 54000000,
+ .venc_freq = 54000000,
+ .pixel_freq = 27000000,
.pll_od1 = 4,
.pll_od2 = 4,
.pll_od3 = 1,
@@ -396,11 +399,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_DDR_148500] = {
- .pll_freq = 2970000,
- .phy_freq = 742500,
- .vclk_freq = 148500,
- .venc_freq = 148500,
- .pixel_freq = 74250,
+ .pll_freq = 2970000000,
+ .phy_freq = 742500000,
+ .vclk_freq = 148500000,
+ .venc_freq = 148500000,
+ .pixel_freq = 74250000,
.pll_od1 = 4,
.pll_od2 = 1,
.pll_od3 = 1,
@@ -408,11 +411,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_74250] = {
- .pll_freq = 2970000,
- .phy_freq = 742500,
- .vclk_freq = 74250,
- .venc_freq = 74250,
- .pixel_freq = 74250,
+ .pll_freq = 2970000000,
+ .phy_freq = 742500000,
+ .vclk_freq = 74250000,
+ .venc_freq = 74250000,
+ .pixel_freq = 74250000,
.pll_od1 = 2,
.pll_od2 = 2,
.pll_od3 = 2,
@@ -420,11 +423,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_148500] = {
- .pll_freq = 2970000,
- .phy_freq = 1485000,
- .vclk_freq = 148500,
- .venc_freq = 148500,
- .pixel_freq = 148500,
+ .pll_freq = 2970000000,
+ .phy_freq = 1485000000,
+ .vclk_freq = 148500000,
+ .venc_freq = 148500000,
+ .pixel_freq = 148500000,
.pll_od1 = 1,
.pll_od2 = 2,
.pll_od3 = 2,
@@ -432,11 +435,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_297000] = {
- .pll_freq = 5940000,
- .phy_freq = 2970000,
- .venc_freq = 297000,
- .vclk_freq = 297000,
- .pixel_freq = 297000,
+ .pll_freq = 5940000000,
+ .phy_freq = 2970000000,
+ .venc_freq = 297000000,
+ .vclk_freq = 297000000,
+ .pixel_freq = 297000000,
.pll_od1 = 2,
.pll_od2 = 1,
.pll_od3 = 1,
@@ -444,11 +447,11 @@ struct meson_vclk_params {
.vclk_div = 2,
},
[MESON_VCLK_HDMI_594000] = {
- .pll_freq = 5940000,
- .phy_freq = 5940000,
- .venc_freq = 594000,
- .vclk_freq = 594000,
- .pixel_freq = 594000,
+ .pll_freq = 5940000000,
+ .phy_freq = 5940000000,
+ .venc_freq = 594000000,
+ .vclk_freq = 594000000,
+ .pixel_freq = 594000000,
.pll_od1 = 1,
.pll_od2 = 1,
.pll_od3 = 2,
@@ -456,11 +459,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_594000_YUV420] = {
- .pll_freq = 5940000,
- .phy_freq = 2970000,
- .venc_freq = 594000,
- .vclk_freq = 594000,
- .pixel_freq = 297000,
+ .pll_freq = 5940000000,
+ .phy_freq = 2970000000,
+ .venc_freq = 594000000,
+ .vclk_freq = 594000000,
+ .pixel_freq = 297000000,
.pll_od1 = 2,
.pll_od2 = 1,
.pll_od3 = 1,
@@ -617,16 +620,16 @@ static void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
3 << 20, pll_od_to_reg(od3) << 20);
}
-#define XTAL_FREQ 24000
+#define XTAL_FREQ (24 * 1000 * 1000)
static unsigned int meson_hdmi_pll_get_m(struct meson_drm *priv,
- unsigned int pll_freq)
+ unsigned long long pll_freq)
{
/* The GXBB PLL has a /2 pre-multiplier */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB))
- pll_freq /= 2;
+ pll_freq = DIV_ROUND_DOWN_ULL(pll_freq, 2);
- return pll_freq / XTAL_FREQ;
+ return DIV_ROUND_DOWN_ULL(pll_freq, XTAL_FREQ);
}
#define HDMI_FRAC_MAX_GXBB 4096
@@ -635,12 +638,13 @@ static unsigned int meson_hdmi_pll_get_m(struct meson_drm *priv,
static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
unsigned int m,
- unsigned int pll_freq)
+ unsigned long long pll_freq)
{
- unsigned int parent_freq = XTAL_FREQ;
+ unsigned long long parent_freq = XTAL_FREQ;
unsigned int frac_max = HDMI_FRAC_MAX_GXL;
unsigned int frac_m;
unsigned int frac;
+ u32 remainder;
/* The GXBB PLL has a /2 pre-multiplier and a larger FRAC width */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
@@ -652,11 +656,11 @@ static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
frac_max = HDMI_FRAC_MAX_G12A;
/* We can have a perfect match !*/
- if (pll_freq / m == parent_freq &&
- pll_freq % m == 0)
+ if (div_u64_rem(pll_freq, m, &remainder) == parent_freq &&
+ remainder == 0)
return 0;
- frac = div_u64((u64)pll_freq * (u64)frac_max, parent_freq);
+ frac = mul_u64_u64_div_u64(pll_freq, frac_max, parent_freq);
frac_m = m * frac_max;
if (frac_m > frac)
return frac_max;
@@ -666,7 +670,7 @@ static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
}
static bool meson_hdmi_pll_validate_params(struct meson_drm *priv,
- unsigned int m,
+ unsigned long long m,
unsigned int frac)
{
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
@@ -694,7 +698,7 @@ static bool meson_hdmi_pll_validate_params(struct meson_drm *priv,
}
static bool meson_hdmi_pll_find_params(struct meson_drm *priv,
- unsigned int freq,
+ unsigned long long freq,
unsigned int *m,
unsigned int *frac,
unsigned int *od)
@@ -706,7 +710,7 @@ static bool meson_hdmi_pll_find_params(struct meson_drm *priv,
continue;
*frac = meson_hdmi_pll_get_frac(priv, *m, freq * *od);
- DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d\n",
+ DRM_DEBUG_DRIVER("PLL params for %lluHz: m=%x frac=%x od=%d\n",
freq, *m, *frac, *od);
if (meson_hdmi_pll_validate_params(priv, *m, *frac))
@@ -718,7 +722,7 @@ static bool meson_hdmi_pll_find_params(struct meson_drm *priv,
/* pll_freq is the frequency after the OD dividers */
enum drm_mode_status
-meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq)
+meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned long long freq)
{
unsigned int od, m, frac;
@@ -741,7 +745,7 @@ EXPORT_SYMBOL_GPL(meson_vclk_dmt_supported_freq);
/* pll_freq is the frequency after the OD dividers */
static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
- unsigned int pll_freq)
+ unsigned long long pll_freq)
{
unsigned int od, m, frac, od1, od2, od3;
@@ -756,7 +760,7 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
od1 = od / od2;
}
- DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d/%d/%d\n",
+ DRM_DEBUG_DRIVER("PLL params for %lluHz: m=%x frac=%x od=%d/%d/%d\n",
pll_freq, m, frac, od1, od2, od3);
meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
@@ -764,17 +768,18 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
return;
}
- DRM_ERROR("Fatal, unable to find parameters for PLL freq %d\n",
+ DRM_ERROR("Fatal, unable to find parameters for PLL freq %lluHz\n",
pll_freq);
}
enum drm_mode_status
-meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
- unsigned int vclk_freq)
+meson_vclk_vic_supported_freq(struct meson_drm *priv,
+ unsigned long long phy_freq,
+ unsigned long long vclk_freq)
{
int i;
- DRM_DEBUG_DRIVER("phy_freq = %d vclk_freq = %d\n",
+ DRM_DEBUG_DRIVER("phy_freq = %lluHz vclk_freq = %lluHz\n",
phy_freq, vclk_freq);
/* Check against soc revision/package limits */
@@ -785,19 +790,19 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
}
for (i = 0 ; params[i].pixel_freq ; ++i) {
- DRM_DEBUG_DRIVER("i = %d pixel_freq = %d alt = %d\n",
+ DRM_DEBUG_DRIVER("i = %d pixel_freq = %lluHz alt = %lluHz\n",
i, params[i].pixel_freq,
- FREQ_1000_1001(params[i].pixel_freq));
- DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n",
+ PIXEL_FREQ_1000_1001(params[i].pixel_freq));
+ DRM_DEBUG_DRIVER("i = %d phy_freq = %lluHz alt = %lluHz\n",
i, params[i].phy_freq,
- FREQ_1000_1001(params[i].phy_freq/1000)*1000);
+ PHY_FREQ_1000_1001(params[i].phy_freq));
/* Match strict frequency */
if (phy_freq == params[i].phy_freq &&
vclk_freq == params[i].vclk_freq)
return MODE_OK;
/* Match 1000/1001 variant */
- if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/1000)*1000) &&
- vclk_freq == FREQ_1000_1001(params[i].vclk_freq))
+ if (phy_freq == PHY_FREQ_1000_1001(params[i].phy_freq) &&
+ vclk_freq == PIXEL_FREQ_1000_1001(params[i].vclk_freq))
return MODE_OK;
}
@@ -805,8 +810,9 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
}
EXPORT_SYMBOL_GPL(meson_vclk_vic_supported_freq);
-static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
- unsigned int od1, unsigned int od2, unsigned int od3,
+static void meson_vclk_set(struct meson_drm *priv,
+ unsigned long long pll_base_freq, unsigned int od1,
+ unsigned int od2, unsigned int od3,
unsigned int vid_pll_div, unsigned int vclk_div,
unsigned int hdmi_tx_div, unsigned int venc_div,
bool hdmi_use_enci, bool vic_alternate_clock)
@@ -826,15 +832,15 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
meson_hdmi_pll_generic_set(priv, pll_base_freq);
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
switch (pll_base_freq) {
- case 2970000:
+ case 2970000000:
m = 0x3d;
frac = vic_alternate_clock ? 0xd02 : 0xe00;
break;
- case 4320000:
+ case 4320000000:
m = vic_alternate_clock ? 0x59 : 0x5a;
frac = vic_alternate_clock ? 0xe8f : 0;
break;
- case 5940000:
+ case 5940000000:
m = 0x7b;
frac = vic_alternate_clock ? 0xa05 : 0xc00;
break;
@@ -844,15 +850,15 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
switch (pll_base_freq) {
- case 2970000:
+ case 2970000000:
m = 0x7b;
frac = vic_alternate_clock ? 0x281 : 0x300;
break;
- case 4320000:
+ case 4320000000:
m = vic_alternate_clock ? 0xb3 : 0xb4;
frac = vic_alternate_clock ? 0x347 : 0;
break;
- case 5940000:
+ case 5940000000:
m = 0xf7;
frac = vic_alternate_clock ? 0x102 : 0x200;
break;
@@ -861,15 +867,15 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
switch (pll_base_freq) {
- case 2970000:
+ case 2970000000:
m = 0x7b;
frac = vic_alternate_clock ? 0x140b4 : 0x18000;
break;
- case 4320000:
+ case 4320000000:
m = vic_alternate_clock ? 0xb3 : 0xb4;
frac = vic_alternate_clock ? 0x1a3ee : 0;
break;
- case 5940000:
+ case 5940000000:
m = 0xf7;
frac = vic_alternate_clock ? 0x8148 : 0x10000;
break;
@@ -1025,14 +1031,14 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
}
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
- unsigned int phy_freq, unsigned int vclk_freq,
- unsigned int venc_freq, unsigned int dac_freq,
+ unsigned long long phy_freq, unsigned long long vclk_freq,
+ unsigned long long venc_freq, unsigned long long dac_freq,
bool hdmi_use_enci)
{
bool vic_alternate_clock = false;
- unsigned int freq;
- unsigned int hdmi_tx_div;
- unsigned int venc_div;
+ unsigned long long freq;
+ unsigned long long hdmi_tx_div;
+ unsigned long long venc_div;
if (target == MESON_VCLK_TARGET_CVBS) {
meson_venci_cvbs_clock_config(priv);
@@ -1052,27 +1058,27 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
return;
}
- hdmi_tx_div = vclk_freq / dac_freq;
+ hdmi_tx_div = DIV_ROUND_DOWN_ULL(vclk_freq, dac_freq);
if (hdmi_tx_div == 0) {
- pr_err("Fatal Error, invalid HDMI-TX freq %d\n",
+ pr_err("Fatal Error, invalid HDMI-TX freq %lluHz\n",
dac_freq);
return;
}
- venc_div = vclk_freq / venc_freq;
+ venc_div = DIV_ROUND_DOWN_ULL(vclk_freq, venc_freq);
if (venc_div == 0) {
- pr_err("Fatal Error, invalid HDMI venc freq %d\n",
+ pr_err("Fatal Error, invalid HDMI venc freq %lluHz\n",
venc_freq);
return;
}
for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
if ((phy_freq == params[freq].phy_freq ||
- phy_freq == FREQ_1000_1001(params[freq].phy_freq/1000)*1000) &&
+ phy_freq == PHY_FREQ_1000_1001(params[freq].phy_freq)) &&
(vclk_freq == params[freq].vclk_freq ||
- vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) {
+ vclk_freq == PIXEL_FREQ_1000_1001(params[freq].vclk_freq))) {
if (vclk_freq != params[freq].vclk_freq)
vic_alternate_clock = true;
else
@@ -1098,7 +1104,8 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
}
if (!params[freq].pixel_freq) {
- pr_err("Fatal Error, invalid HDMI vclk freq %d\n", vclk_freq);
+ pr_err("Fatal Error, invalid HDMI vclk freq %lluHz\n",
+ vclk_freq);
return;
}
diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
index 60617aaf18dd..7ac55744e574 100644
--- a/drivers/gpu/drm/meson/meson_vclk.h
+++ b/drivers/gpu/drm/meson/meson_vclk.h
@@ -20,17 +20,18 @@ enum {
};
/* 27MHz is the CVBS Pixel Clock */
-#define MESON_VCLK_CVBS 27000
+#define MESON_VCLK_CVBS (27 * 1000 * 1000)
enum drm_mode_status
-meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq);
+meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned long long freq);
enum drm_mode_status
-meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
- unsigned int vclk_freq);
+meson_vclk_vic_supported_freq(struct meson_drm *priv,
+ unsigned long long phy_freq,
+ unsigned long long vclk_freq);
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
- unsigned int phy_freq, unsigned int vclk_freq,
- unsigned int venc_freq, unsigned int dac_freq,
+ unsigned long long phy_freq, unsigned long long vclk_freq,
+ unsigned long long venc_freq, unsigned long long dac_freq,
bool hdmi_use_enci);
#endif /* __MESON_VCLK_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 7b56da24711e..eca9c7d4ec6f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -2539,6 +2539,38 @@ static int dpu_encoder_virt_add_phys_encs(
return 0;
}
+/**
+ * dpu_encoder_get_clones - Calculate the possible_clones for DPU encoder
+ * @drm_enc: DRM encoder pointer
+ * Returns: possible_clones mask
+ */
+uint32_t dpu_encoder_get_clones(struct drm_encoder *drm_enc)
+{
+ struct drm_encoder *curr;
+ int type = drm_enc->encoder_type;
+ uint32_t clone_mask = drm_encoder_mask(drm_enc);
+
+ /*
+ * Set writeback as possible clones of real-time DSI encoders and vice
+ * versa
+ *
+ * Writeback encoders can't be clones of each other and DSI
+ * encoders can't be clones of each other.
+ *
+ * TODO: Add DP encoders as valid possible clones for writeback encoders
+ * (and vice versa) once concurrent writeback has been validated for DP
+ */
+ drm_for_each_encoder(curr, drm_enc->dev) {
+ if ((type == DRM_MODE_ENCODER_VIRTUAL &&
+ curr->encoder_type == DRM_MODE_ENCODER_DSI) ||
+ (type == DRM_MODE_ENCODER_DSI &&
+ curr->encoder_type == DRM_MODE_ENCODER_VIRTUAL))
+ clone_mask |= drm_encoder_mask(curr);
+ }
+
+ return clone_mask;
+}
+
static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
struct dpu_kms *dpu_kms,
struct msm_display_info *disp_info)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index da133ee4701a..751be231ee7b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -60,6 +60,8 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
void dpu_encoder_virt_runtime_resume(struct drm_encoder *encoder);
+uint32_t dpu_encoder_get_clones(struct drm_encoder *drm_enc);
+
struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
int drm_enc_mode,
struct msm_display_info *disp_info);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 8741dc6fc8dd..b8f4ebba8ac2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2013 Red Hat
* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Author: Rob Clark <robdclark@gmail.com>
*/
@@ -834,8 +834,11 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
return ret;
num_encoders = 0;
- drm_for_each_encoder(encoder, dev)
+ drm_for_each_encoder(encoder, dev) {
num_encoders++;
+ if (catalog->cwb_count > 0)
+ encoder->possible_clones = dpu_encoder_get_clones(encoder);
+ }
max_crtc_count = min(catalog->mixer_count, num_encoders);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 7cc84472cece..edddfc036c6d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -90,7 +90,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
while (!list_empty(&fctx->pending)) {
fence = list_entry(fctx->pending.next, typeof(*fence), head);
- if (error)
+ if (error && !dma_fence_is_signaled_locked(&fence->base))
dma_fence_set_error(&fence->base, error);
if (nouveau_fence_signal(fence))
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index 58502102926b..bb86b6d4ca49 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -61,7 +61,7 @@
extern struct dentry *nouveau_debugfs_root;
#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE
-#define GSP_MSG_MAX_SIZE GSP_PAGE_MIN_SIZE * 16
+#define GSP_MSG_MAX_SIZE (GSP_MSG_MIN_SIZE * 16)
struct r535_gsp_msg {
u8 auth_tag_buffer[16];
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index f8511fe5fb0d..b0315d3ba00a 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -1993,6 +1993,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1593, &delay_200_500_p2e100, "LQ134N1"),
+ EDP_PANEL_ENTRY('S', 'T', 'A', 0x0004, &delay_200_500_e200, "116KHD024006"),
EDP_PANEL_ENTRY('S', 'T', 'A', 0x0100, &delay_100_500_e200, "2081116HHD028001-51D"),
{ /* sentinal */ }
diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
index 7d68a8acfe2e..eb0f8373258c 100644
--- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
@@ -129,11 +129,11 @@ static int jadard_unprepare(struct drm_panel *panel)
{
struct jadard *jadard = panel_to_jadard(panel);
- gpiod_set_value(jadard->reset, 1);
+ gpiod_set_value(jadard->reset, 0);
msleep(120);
if (jadard->desc->reset_before_power_off_vcioo) {
- gpiod_set_value(jadard->reset, 0);
+ gpiod_set_value(jadard->reset, 1);
usleep_range(1000, 2000);
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 9b2f128fd309..cf9ab2d1f1d2 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -1027,27 +1027,28 @@ static const struct panel_desc auo_g070vvn01 = {
},
};
-static const struct drm_display_mode auo_g101evn010_mode = {
- .clock = 68930,
- .hdisplay = 1280,
- .hsync_start = 1280 + 82,
- .hsync_end = 1280 + 82 + 2,
- .htotal = 1280 + 82 + 2 + 84,
- .vdisplay = 800,
- .vsync_start = 800 + 8,
- .vsync_end = 800 + 8 + 2,
- .vtotal = 800 + 8 + 2 + 6,
+static const struct display_timing auo_g101evn010_timing = {
+ .pixelclock = { 64000000, 68930000, 85000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 8, 64, 256 },
+ .hback_porch = { 8, 64, 256 },
+ .hsync_len = { 40, 168, 767 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 4, 8, 100 },
+ .vback_porch = { 4, 8, 100 },
+ .vsync_len = { 8, 16, 223 },
};
static const struct panel_desc auo_g101evn010 = {
- .modes = &auo_g101evn010_mode,
- .num_modes = 1,
+ .timings = &auo_g101evn010_timing,
+ .num_timings = 1,
.bpc = 6,
.size = {
.width = 216,
.height = 135,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index 17a98845fd31..bcbd49882392 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -159,6 +159,7 @@ struct vop2_video_port {
struct drm_crtc crtc;
struct vop2 *vop2;
struct clk *dclk;
+ struct clk *dclk_src;
unsigned int id;
const struct vop2_video_port_data *data;
@@ -214,6 +215,7 @@ struct vop2 {
struct clk *hclk;
struct clk *aclk;
struct clk *pclk;
+ struct clk *pll_hdmiphy0;
/* optional internal rgb encoder */
struct rockchip_rgb *rgb;
@@ -222,6 +224,8 @@ struct vop2 {
struct vop2_win win[];
};
+#define VOP2_MAX_DCLK_RATE 600000000
+
#define vop2_output_if_is_hdmi(x) ((x) == ROCKCHIP_VOP2_EP_HDMI0 || \
(x) == ROCKCHIP_VOP2_EP_HDMI1)
@@ -1155,6 +1159,9 @@ static void vop2_crtc_atomic_disable(struct drm_crtc *crtc,
vop2_crtc_disable_irq(vp, VP_INT_DSP_HOLD_VALID);
+ if (vp->dclk_src)
+ clk_set_parent(vp->dclk, vp->dclk_src);
+
clk_disable_unprepare(vp->dclk);
vop2->enable_count--;
@@ -1547,10 +1554,8 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
rb_swap = vop2_win_rb_swap(fb->format->format);
vop2_win_write(win, VOP2_WIN_RB_SWAP, rb_swap);
- if (!vop2_cluster_window(win)) {
- uv_swap = vop2_win_uv_swap(fb->format->format);
- vop2_win_write(win, VOP2_WIN_UV_SWAP, uv_swap);
- }
+ uv_swap = vop2_win_uv_swap(fb->format->format);
+ vop2_win_write(win, VOP2_WIN_UV_SWAP, uv_swap);
if (fb->format->is_yuv) {
vop2_win_write(win, VOP2_WIN_UV_VIR, DIV_ROUND_UP(fb->pitches[1], 4));
@@ -2259,6 +2264,27 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
vop2_vp_write(vp, RK3568_VP_MIPI_CTRL, 0);
+ /*
+ * Switch to HDMI PHY PLL as DCLK source for display modes up
+ * to 4K@60Hz, if available, otherwise keep using the system CRU.
+ */
+ if (vop2->pll_hdmiphy0 && clock <= VOP2_MAX_DCLK_RATE) {
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc_state->encoder_mask) {
+ struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
+
+ if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI0) {
+ if (!vp->dclk_src)
+ vp->dclk_src = clk_get_parent(vp->dclk);
+
+ ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy0);
+ if (ret < 0)
+ drm_warn(vop2->drm,
+ "Could not switch to HDMI0 PHY PLL: %d\n", ret);
+ break;
+ }
+ }
+ }
+
clk_set_rate(vp->dclk, clock);
vop2_post_config(crtc);
@@ -3699,6 +3725,12 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(vop2->pclk);
}
+ vop2->pll_hdmiphy0 = devm_clk_get_optional(vop2->dev, "pll_hdmiphy0");
+ if (IS_ERR(vop2->pll_hdmiphy0)) {
+ drm_err(vop2->drm, "failed to get pll_hdmiphy0\n");
+ return PTR_ERR(vop2->pll_hdmiphy0);
+ }
+
vop2->irq = platform_get_irq(pdev, 0);
if (vop2->irq < 0) {
drm_err(vop2->drm, "cannot find irq for vop2\n");
diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
index fd4215e2f982..925fbc2cda70 100644
--- a/drivers/gpu/drm/tests/drm_gem_shmem_test.c
+++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
@@ -216,6 +216,9 @@ static void drm_gem_shmem_test_get_pages_sgt(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
KUNIT_EXPECT_NULL(test, shmem->sgt);
+ ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
KUNIT_ASSERT_EQ(test, ret, 0);
diff --git a/drivers/gpu/drm/tiny/panel-mipi-dbi.c b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
index 0460ecaef4bd..23914a9f7fd3 100644
--- a/drivers/gpu/drm/tiny/panel-mipi-dbi.c
+++ b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
@@ -390,7 +390,10 @@ static int panel_mipi_dbi_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, drm);
- drm_client_setup(drm, NULL);
+ if (bpp == 16)
+ drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB565);
+ else
+ drm_client_setup_with_fourcc(drm, DRM_FORMAT_RGB888);
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index ea5e49858857..72c675191a02 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1092,7 +1092,8 @@ struct ttm_bo_swapout_walk {
struct ttm_lru_walk walk;
/** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */
gfp_t gfp_flags;
-
+ /** @hit_low: Whether we should attempt to swap BO's with low watermark threshold */
+ /** @evict_low: If we cannot swap a bo when @try_low is false (first pass) */
bool hit_low, evict_low;
};
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 930737a9347b..852015214e97 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -295,11 +295,21 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
if (ret)
return ret;
+ v3d->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(v3d->clk))
+ return dev_err_probe(dev, PTR_ERR(v3d->clk), "Failed to get V3D clock\n");
+
+ ret = clk_prepare_enable(v3d->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't enable the V3D clock\n");
+ return ret;
+ }
+
mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO);
mask = DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH));
ret = dma_set_mask_and_coherent(dev, mask);
if (ret)
- return ret;
+ goto clk_disable;
v3d->va_width = 30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_VA_WIDTH);
@@ -319,28 +329,29 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
ret = PTR_ERR(v3d->reset);
if (ret == -EPROBE_DEFER)
- return ret;
+ goto clk_disable;
v3d->reset = NULL;
ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
if (ret) {
dev_err(dev,
"Failed to get reset control or bridge regs\n");
- return ret;
+ goto clk_disable;
}
}
if (v3d->ver < 41) {
ret = map_regs(v3d, &v3d->gca_regs, "gca");
if (ret)
- return ret;
+ goto clk_disable;
}
v3d->mmu_scratch = dma_alloc_wc(dev, 4096, &v3d->mmu_scratch_paddr,
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!v3d->mmu_scratch) {
dev_err(dev, "Failed to allocate MMU scratch page\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto clk_disable;
}
ret = v3d_gem_init(drm);
@@ -369,6 +380,8 @@ gem_destroy:
v3d_gem_destroy(drm);
dma_free:
dma_free_wc(dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr);
+clk_disable:
+ clk_disable_unprepare(v3d->clk);
return ret;
}
@@ -386,6 +399,8 @@ static void v3d_platform_drm_remove(struct platform_device *pdev)
dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch,
v3d->mmu_scratch_paddr);
+
+ clk_disable_unprepare(v3d->clk);
}
static struct platform_driver v3d_platform_driver = {
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 6db503a56918..78ebc7d54a0c 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -746,11 +746,16 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
return DRM_GPU_SCHED_STAT_NOMINAL;
}
-/* If the current address or return address have changed, then the GPU
- * has probably made progress and we should delay the reset. This
- * could fail if the GPU got in an infinite loop in the CL, but that
- * is pretty unlikely outside of an i-g-t testcase.
- */
+static void
+v3d_sched_skip_reset(struct drm_sched_job *sched_job)
+{
+ struct drm_gpu_scheduler *sched = sched_job->sched;
+
+ spin_lock(&sched->job_list_lock);
+ list_add(&sched_job->list, &sched->pending_list);
+ spin_unlock(&sched->job_list_lock);
+}
+
static enum drm_gpu_sched_stat
v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
u32 *timedout_ctca, u32 *timedout_ctra)
@@ -760,9 +765,16 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q));
u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q));
+ /* If the current address or return address have changed, then the GPU
+ * has probably made progress and we should delay the reset. This
+ * could fail if the GPU got in an infinite loop in the CL, but that
+ * is pretty unlikely outside of an i-g-t testcase.
+ */
if (*timedout_ctca != ctca || *timedout_ctra != ctra) {
*timedout_ctca = ctca;
*timedout_ctra = ctra;
+
+ v3d_sched_skip_reset(sched_job);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
@@ -802,11 +814,13 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
struct v3d_dev *v3d = job->base.v3d;
u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4(v3d->ver));
- /* If we've made progress, skip reset and let the timer get
- * rearmed.
+ /* If we've made progress, skip reset, add the job to the pending
+ * list, and let the timer get rearmed.
*/
if (job->timedout_batches != batches) {
job->timedout_batches = batches;
+
+ v3d_sched_skip_reset(sched_job);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 6a67c6297d58..8719b778a1ff 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -125,6 +125,14 @@ static void virtio_gpu_remove(struct virtio_device *vdev)
drm_dev_put(dev);
}
+static void virtio_gpu_shutdown(struct virtio_device *vdev)
+{
+ /*
+ * drm does its own synchronization on shutdown.
+ * Do nothing here, opt out of device reset.
+ */
+}
+
static void virtio_gpu_config_changed(struct virtio_device *vdev)
{
struct drm_device *dev = vdev->priv;
@@ -159,6 +167,7 @@ static struct virtio_driver virtio_gpu_driver = {
.id_table = id_table,
.probe = virtio_gpu_probe,
.remove = virtio_gpu_remove,
+ .shutdown = virtio_gpu_shutdown,
.config_changed = virtio_gpu_config_changed
};
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index b3921dbc52ff..b735e30953ce 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -346,7 +346,8 @@ static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime)
xe_display_flush_cleanup_work(xe);
- intel_hpd_cancel_work(xe);
+ if (!runtime)
+ intel_hpd_cancel_work(xe);
if (!runtime && has_display(xe)) {
intel_display_driver_suspend_access(display);
diff --git a/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h b/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h
index a255946b6f77..8cfcd3360896 100644
--- a/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h
@@ -41,6 +41,7 @@
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2))
+#define PIPE_CONTROL0_L3_READ_ONLY_CACHE_INVALIDATE BIT(10) /* gen12 */
#define PIPE_CONTROL0_HDC_PIPELINE_FLUSH BIT(9) /* gen12 */
#define PIPE_CONTROL_COMMAND_CACHE_INVALIDATE (1<<29)
diff --git a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
index 10ec2920d31b..d4033278be9f 100644
--- a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
@@ -47,6 +47,10 @@
#define MI_LRI_FORCE_POSTED REG_BIT(12)
#define MI_LRI_LEN(x) (((x) & 0xff) + 1)
+#define MI_STORE_REGISTER_MEM (__MI_INSTR(0x24) | XE_INSTR_NUM_DW(4))
+#define MI_SRM_USE_GGTT REG_BIT(22)
+#define MI_SRM_ADD_CS_OFFSET REG_BIT(19)
+
#define MI_FLUSH_DW __MI_INSTR(0x26)
#define MI_FLUSH_DW_STORE_INDEX REG_BIT(21)
#define MI_INVALIDATE_TLB REG_BIT(18)
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 162f18e975da..ab95d3545a72 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -157,6 +157,7 @@
#define XEHPG_SC_INSTDONE_EXTRA2 XE_REG_MCR(0x7108)
#define COMMON_SLICE_CHICKEN4 XE_REG(0x7300, XE_REG_OPTION_MASKED)
+#define SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE REG_BIT(12)
#define DISABLE_TDC_LOAD_BALANCING_CALC REG_BIT(6)
#define COMMON_SLICE_CHICKEN3 XE_REG(0x7304, XE_REG_OPTION_MASKED)
@@ -475,6 +476,7 @@
#define TDL_TSL_CHICKEN XE_REG_MCR(0xe4c4, XE_REG_OPTION_MASKED)
#define STK_ID_RESTRICT REG_BIT(12)
#define SLM_WMTP_RESTORE REG_BIT(11)
+#define RES_CHK_SPR_DIS REG_BIT(6)
#define ROW_CHICKEN XE_REG_MCR(0xe4f0, XE_REG_OPTION_MASKED)
#define UGM_BACKUP_MODE REG_BIT(13)
@@ -500,6 +502,9 @@
#define LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK REG_GENMASK(13, 11)
#define DIS_ATOMIC_CHAINING_TYPED_WRITES REG_BIT(3)
+#define TDL_CHICKEN XE_REG_MCR(0xe5f4, XE_REG_OPTION_MASKED)
+#define QID_WAIT_FOR_THREAD_NOT_RUN_DISABLE REG_BIT(12)
+
#define LSC_CHICKEN_BIT_0 XE_REG_MCR(0xe7c8)
#define DISABLE_D8_D16_COASLESCE REG_BIT(30)
#define WR_REQ_CHAINING_DIS REG_BIT(26)
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
index ef1e5256c56a..0e502feaca81 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
@@ -46,8 +46,11 @@ static void read_l3cc_table(struct xe_gt *gt,
unsigned int fw_ref, i;
u32 reg_val;
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n");
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ KUNIT_ASSERT_TRUE_MSG(test, true, "Forcewake Failed.\n");
+ }
for (i = 0; i < info->num_mocs_regs; i++) {
if (!(i & 1)) {
diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
index 36a3b5420fef..b0254b014fe4 100644
--- a/drivers/gpu/drm/xe/tests/xe_rtp_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
@@ -320,7 +320,7 @@ static void xe_rtp_process_to_sr_tests(struct kunit *test)
count_rtp_entries++;
xe_rtp_process_ctx_enable_active_tracking(&ctx, &active, count_rtp_entries);
- xe_rtp_process_to_sr(&ctx, param->entries, reg_sr);
+ xe_rtp_process_to_sr(&ctx, param->entries, count_rtp_entries, reg_sr);
xa_for_each(&reg_sr->xa, idx, sre) {
if (idx == param->expected_reg.addr)
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 3f5391d416d4..2070aa12059c 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -713,6 +713,21 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
goto out;
}
+ /* Reject BO eviction if BO is bound to current VM. */
+ if (evict && ctx->resv) {
+ struct drm_gpuvm_bo *vm_bo;
+
+ drm_gem_for_each_gpuvm_bo(vm_bo, &bo->ttm.base) {
+ struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
+
+ if (xe_vm_resv(vm) == ctx->resv &&
+ xe_vm_in_preempt_fence_mode(vm)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ }
+ }
+
/*
* Failed multi-hop where the old_mem is still marked as
* TTM_PL_FLAG_TEMPORARY, should just be a dummy move.
@@ -2142,6 +2157,7 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
struct xe_file *xef = to_xe_file(file);
struct drm_xe_gem_create *args = data;
struct xe_vm *vm = NULL;
+ ktime_t end = 0;
struct xe_bo *bo;
unsigned int bo_flags;
u32 handle;
@@ -2214,6 +2230,10 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
vm = xe_vm_lookup(xef, args->vm_id);
if (XE_IOCTL_DBG(xe, !vm))
return -ENOENT;
+ }
+
+retry:
+ if (vm) {
err = xe_vm_lock(vm, true);
if (err)
goto out_vm;
@@ -2227,6 +2247,8 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
+ if (xe_vm_validate_should_retry(NULL, err, &end))
+ goto retry;
goto out_vm;
}
diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
index 492b4877433f..92e6fa8fe3a1 100644
--- a/drivers/gpu/drm/xe/xe_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_debugfs.c
@@ -166,7 +166,7 @@ static ssize_t wedged_mode_set(struct file *f, const char __user *ubuf,
return -EINVAL;
if (xe->wedged.mode == wedged_mode)
- return 0;
+ return size;
xe->wedged.mode = wedged_mode;
@@ -175,6 +175,7 @@ static ssize_t wedged_mode_set(struct file *f, const char __user *ubuf,
ret = xe_guc_ads_scheduler_policy_toggle_reset(&gt->uc.guc.ads);
if (ret) {
xe_gt_err(gt, "Failed to update GuC ADS scheduler policy. GuC may still cause engine reset even with wedged_mode=2\n");
+ xe_pm_runtime_put(xe);
return -EIO;
}
}
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 4e1839b483a0..74516e73ba4e 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -722,7 +722,9 @@ int xe_device_probe(struct xe_device *xe)
}
/* Allocate and map stolen after potential VRAM resize */
- xe_ttm_stolen_mgr_init(xe);
+ err = xe_ttm_stolen_mgr_init(xe);
+ if (err)
+ return err;
/*
* Now that GT is initialized (TTM in particular),
@@ -734,6 +736,12 @@ int xe_device_probe(struct xe_device *xe)
if (err)
goto err;
+ for_each_tile(tile, xe, id) {
+ err = xe_tile_init(tile);
+ if (err)
+ goto err;
+ }
+
for_each_gt(gt, xe, id) {
last_gt = id;
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 2d4874d2b922..31f688e953d7 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -325,6 +325,14 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
unsigned int fw_ref;
/*
+ * RING_TIMESTAMP registers are inaccessible in VF mode.
+ * Without drm-total-cycles-*, other keys provide little value.
+ * Show all or none of the optional "run_ticks" keys in this case.
+ */
+ if (IS_SRIOV_VF(xe))
+ return;
+
+ /*
* Wait for any exec queue going away: their cycles will get updated on
* context switch out, so wait for that to happen
*/
diff --git a/drivers/gpu/drm/xe/xe_gen_wa_oob.c b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
index 904cf47925aa..ed9183599e31 100644
--- a/drivers/gpu/drm/xe/xe_gen_wa_oob.c
+++ b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
@@ -28,10 +28,10 @@
"\n" \
"#endif\n"
-static void print_usage(FILE *f)
+static void print_usage(FILE *f, const char *progname)
{
fprintf(f, "usage: %s <input-rule-file> <generated-c-source-file> <generated-c-header-file>\n",
- program_invocation_short_name);
+ progname);
}
static void print_parse_error(const char *err_msg, const char *line,
@@ -144,7 +144,7 @@ int main(int argc, const char *argv[])
if (argc < 3) {
fprintf(stderr, "ERROR: wrong arguments\n");
- print_usage(stderr);
+ print_usage(stderr, argv[0]);
return 1;
}
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
index 1eb791ddc375..0f11b1c00960 100644
--- a/drivers/gpu/drm/xe/xe_gsc.c
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -564,6 +564,28 @@ void xe_gsc_remove(struct xe_gsc *gsc)
xe_gsc_proxy_remove(gsc);
}
+void xe_gsc_stop_prepare(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ int ret;
+
+ if (!xe_uc_fw_is_loadable(&gsc->fw) || xe_uc_fw_is_in_error_state(&gsc->fw))
+ return;
+
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GSC);
+
+ /*
+ * If the GSC FW load or the proxy init are interrupted, the only way
+ * to recover it is to do an FLR and reload the GSC from scratch.
+ * Therefore, let's wait for the init to complete before stopping
+ * operations. The proxy init is the last step, so we can just wait on
+ * that
+ */
+ ret = xe_gsc_wait_for_proxy_init_done(gsc);
+ if (ret)
+ xe_gt_err(gt, "failed to wait for GSC init completion before uc stop\n");
+}
+
/*
* wa_14015076503: if the GSC FW is loaded, we need to alert it before doing a
* GSC engine reset by writing a notification bit in the GS1 register and then
diff --git a/drivers/gpu/drm/xe/xe_gsc.h b/drivers/gpu/drm/xe/xe_gsc.h
index e282b9ef6ec4..c31fe24c4b66 100644
--- a/drivers/gpu/drm/xe/xe_gsc.h
+++ b/drivers/gpu/drm/xe/xe_gsc.h
@@ -16,6 +16,7 @@ struct xe_hw_engine;
int xe_gsc_init(struct xe_gsc *gsc);
int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc);
void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc);
+void xe_gsc_stop_prepare(struct xe_gsc *gsc);
void xe_gsc_load_start(struct xe_gsc *gsc);
void xe_gsc_remove(struct xe_gsc *gsc);
void xe_gsc_hwe_irq_handler(struct xe_hw_engine *hwe, u16 intr_vec);
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c
index 24cc6a4f9a96..76636da3d06c 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.c
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c
@@ -71,6 +71,17 @@ bool xe_gsc_proxy_init_done(struct xe_gsc *gsc)
HECI1_FWSTS1_PROXY_STATE_NORMAL;
}
+int xe_gsc_wait_for_proxy_init_done(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+
+ /* Proxy init can take up to 500ms, so wait double that for safety */
+ return xe_mmio_wait32(&gt->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE),
+ HECI1_FWSTS1_CURRENT_STATE,
+ HECI1_FWSTS1_PROXY_STATE_NORMAL,
+ USEC_PER_SEC, NULL, false);
+}
+
static void __gsc_proxy_irq_rmw(struct xe_gsc *gsc, u32 clr, u32 set)
{
struct xe_gt *gt = gsc_to_gt(gsc);
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.h b/drivers/gpu/drm/xe/xe_gsc_proxy.h
index c511ade6b863..e2498aa6de18 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.h
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.h
@@ -13,6 +13,7 @@ struct xe_gsc;
int xe_gsc_proxy_init(struct xe_gsc *gsc);
bool xe_gsc_proxy_init_done(struct xe_gsc *gsc);
void xe_gsc_proxy_remove(struct xe_gsc *gsc);
+int xe_gsc_wait_for_proxy_init_done(struct xe_gsc *gsc);
int xe_gsc_proxy_start(struct xe_gsc *gsc);
int xe_gsc_proxy_request_handler(struct xe_gsc *gsc);
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 8a20e6744836..6b4b9eca2c38 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -381,6 +381,10 @@ int xe_gt_init_early(struct xe_gt *gt)
if (err)
return err;
+ err = xe_tuning_init(gt);
+ if (err)
+ return err;
+
xe_wa_process_oob(gt);
xe_force_wake_init_gt(gt, gt_to_fw(gt));
@@ -646,6 +650,9 @@ void xe_gt_mmio_init(struct xe_gt *gt)
if (gt->info.type == XE_GT_TYPE_MEDIA) {
gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET;
gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH;
+ } else {
+ gt->mmio.adj_offset = 0;
+ gt->mmio.adj_limit = 0;
}
if (IS_SRIOV_VF(gt_to_xe(gt)))
@@ -858,7 +865,7 @@ void xe_gt_suspend_prepare(struct xe_gt *gt)
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- xe_uc_stop_prepare(&gt->uc);
+ xe_uc_suspend_prepare(&gt->uc);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index e7792858b1e4..f7005a3643e6 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -30,6 +30,7 @@
#include "xe_reg_sr.h"
#include "xe_reg_whitelist.h"
#include "xe_sriov.h"
+#include "xe_tuning.h"
#include "xe_uc_debugfs.h"
#include "xe_wa.h"
@@ -91,22 +92,23 @@ static int hw_engines(struct xe_gt *gt, struct drm_printer *p)
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
unsigned int fw_ref;
+ int ret = 0;
xe_pm_runtime_get(xe);
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
- xe_pm_runtime_put(xe);
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto fw_put;
}
for_each_hw_engine(hwe, gt, id)
xe_hw_engine_print(hwe, p);
+fw_put:
xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_pm_runtime_put(xe);
- return 0;
+ return ret;
}
static int powergate_info(struct xe_gt *gt, struct drm_printer *p)
@@ -217,6 +219,15 @@ static int workarounds(struct xe_gt *gt, struct drm_printer *p)
return 0;
}
+static int tunings(struct xe_gt *gt, struct drm_printer *p)
+{
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_tuning_dump(gt, p);
+ xe_pm_runtime_put(gt_to_xe(gt));
+
+ return 0;
+}
+
static int pat(struct xe_gt *gt, struct drm_printer *p)
{
xe_pm_runtime_get(gt_to_xe(gt));
@@ -300,6 +311,7 @@ static const struct drm_info_list debugfs_list[] = {
{"powergate_info", .show = xe_gt_debugfs_simple_show, .data = powergate_info},
{"register-save-restore", .show = xe_gt_debugfs_simple_show, .data = register_save_restore},
{"workarounds", .show = xe_gt_debugfs_simple_show, .data = workarounds},
+ {"tunings", .show = xe_gt_debugfs_simple_show, .data = tunings},
{"pat", .show = xe_gt_debugfs_simple_show, .data = pat},
{"mocs", .show = xe_gt_debugfs_simple_show, .data = mocs},
{"default_lrc_rcs", .show = xe_gt_debugfs_simple_show, .data = rcs_default_lrc},
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index ffd3ba7f6656..fbbace7b0b12 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -69,6 +69,8 @@ static u64 get_residency_ms(struct xe_gt_idle *gtidle, u64 cur_residency)
{
u64 delta, overflow_residency, prev_residency;
+ lockdep_assert_held(&gtidle->lock);
+
overflow_residency = BIT_ULL(32);
/*
@@ -275,8 +277,21 @@ static ssize_t idle_status_show(struct device *dev,
return sysfs_emit(buff, "%s\n", gt_idle_state_to_string(state));
}
-static DEVICE_ATTR_RO(idle_status);
+u64 xe_gt_idle_residency_msec(struct xe_gt_idle *gtidle)
+{
+ struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
+ u64 residency;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&gtidle->lock, flags);
+ residency = get_residency_ms(gtidle, gtidle->idle_residency(pc));
+ raw_spin_unlock_irqrestore(&gtidle->lock, flags);
+
+ return residency;
+}
+
+static DEVICE_ATTR_RO(idle_status);
static ssize_t idle_residency_ms_show(struct device *dev,
struct device_attribute *attr, char *buff)
{
@@ -285,10 +300,10 @@ static ssize_t idle_residency_ms_show(struct device *dev,
u64 residency;
xe_pm_runtime_get(pc_to_xe(pc));
- residency = gtidle->idle_residency(pc);
+ residency = xe_gt_idle_residency_msec(gtidle);
xe_pm_runtime_put(pc_to_xe(pc));
- return sysfs_emit(buff, "%llu\n", get_residency_ms(gtidle, residency));
+ return sysfs_emit(buff, "%llu\n", residency);
}
static DEVICE_ATTR_RO(idle_residency_ms);
@@ -331,6 +346,8 @@ int xe_gt_idle_init(struct xe_gt_idle *gtidle)
if (!kobj)
return -ENOMEM;
+ raw_spin_lock_init(&gtidle->lock);
+
if (xe_gt_is_media_type(gt)) {
snprintf(gtidle->name, sizeof(gtidle->name), "gt%d-mc", gt->info.id);
gtidle->idle_residency = xe_guc_pc_mc6_residency;
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.h b/drivers/gpu/drm/xe/xe_gt_idle.h
index 4455a6501cb0..591a01e181bc 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.h
+++ b/drivers/gpu/drm/xe/xe_gt_idle.h
@@ -17,5 +17,6 @@ void xe_gt_idle_disable_c6(struct xe_gt *gt);
void xe_gt_idle_enable_pg(struct xe_gt *gt);
void xe_gt_idle_disable_pg(struct xe_gt *gt);
int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p);
+u64 xe_gt_idle_residency_msec(struct xe_gt_idle *gtidle);
#endif /* _XE_GT_IDLE_H_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_idle_types.h b/drivers/gpu/drm/xe/xe_gt_idle_types.h
index b8b297a3f884..a3667c567f8a 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_idle_types.h
@@ -6,6 +6,7 @@
#ifndef _XE_GT_IDLE_SYSFS_TYPES_H_
#define _XE_GT_IDLE_SYSFS_TYPES_H_
+#include <linux/spinlock.h>
#include <linux/types.h>
struct xe_guc_pc;
@@ -31,6 +32,8 @@ struct xe_gt_idle {
u64 cur_residency;
/** @prev_residency: previous residency counter */
u64 prev_residency;
+ /** @lock: Lock protecting idle residency counters */
+ raw_spinlock_t lock;
/** @idle_status: get the current idle state */
enum xe_gt_idle_state (*idle_status)(struct xe_guc_pc *pc);
/** @idle_residency: get idle residency counter */
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 2606cd396df5..0d0207be93ed 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -422,9 +422,16 @@ static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue)
num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss,
XE_MAX_EU_FUSE_BITS) * num_dss;
- /* user can issue separate page faults per EU and per CS */
+ /*
+ * user can issue separate page faults per EU and per CS
+ *
+ * XXX: Multiplier required as compute UMD are getting PF queue errors
+ * without it. Follow on why this multiplier is required.
+ */
+#define PF_MULTIPLIER 8
pf_queue->num_dw =
- (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW;
+ (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW * PF_MULTIPLIER;
+#undef PF_MULTIPLIER
pf_queue->gt = gt;
pf_queue->data = devm_kcalloc(xe->drm.dev, pf_queue->num_dw,
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
index 6f906c8e8108..c08efca6420e 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -15,7 +15,11 @@
#include "xe_gt_sriov_pf_helpers.h"
#include "xe_gt_sriov_pf_migration.h"
#include "xe_gt_sriov_pf_service.h"
+#include "xe_gt_sriov_printk.h"
#include "xe_mmio.h"
+#include "xe_pm.h"
+
+static void pf_worker_restart_func(struct work_struct *w);
/*
* VF's metadata is maintained in the flexible array where:
@@ -41,6 +45,11 @@ static int pf_alloc_metadata(struct xe_gt *gt)
return 0;
}
+static void pf_init_workers(struct xe_gt *gt)
+{
+ INIT_WORK(&gt->sriov.pf.workers.restart, pf_worker_restart_func);
+}
+
/**
* xe_gt_sriov_pf_init_early - Prepare SR-IOV PF data structures on PF.
* @gt: the &xe_gt to initialize
@@ -65,6 +74,8 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
if (err)
return err;
+ pf_init_workers(gt);
+
return 0;
}
@@ -78,6 +89,12 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
*/
int xe_gt_sriov_pf_init(struct xe_gt *gt)
{
+ int err;
+
+ err = xe_gt_sriov_pf_config_init(gt);
+ if (err)
+ return err;
+
return xe_gt_sriov_pf_migration_init(gt);
}
@@ -155,6 +172,35 @@ void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid)
pf_clear_vf_scratch_regs(gt, vfid);
}
+static void pf_restart(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_pm_runtime_get(xe);
+ xe_gt_sriov_pf_config_restart(gt);
+ xe_gt_sriov_pf_control_restart(gt);
+ xe_pm_runtime_put(xe);
+
+ xe_gt_sriov_dbg(gt, "restart completed\n");
+}
+
+static void pf_worker_restart_func(struct work_struct *w)
+{
+ struct xe_gt *gt = container_of(w, typeof(*gt), sriov.pf.workers.restart);
+
+ pf_restart(gt);
+}
+
+static void pf_queue_restart(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+
+ if (!queue_work(xe->sriov.wq, &gt->sriov.pf.workers.restart))
+ xe_gt_sriov_dbg(gt, "restart already in queue!\n");
+}
+
/**
* xe_gt_sriov_pf_restart - Restart SR-IOV support after a GT reset.
* @gt: the &xe_gt
@@ -163,6 +209,5 @@ void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid)
*/
void xe_gt_sriov_pf_restart(struct xe_gt *gt)
{
- xe_gt_sriov_pf_config_restart(gt);
- xe_gt_sriov_pf_control_restart(gt);
+ pf_queue_restart(gt);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 4bd255adfb40..27f309e3a76e 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -336,6 +336,26 @@ static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
return err;
}
+static int pf_push_vf_cfg(struct xe_gt *gt, unsigned int vfid, bool reset)
+{
+ int err = 0;
+
+ xe_gt_assert(gt, vfid);
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (reset)
+ err = pf_send_vf_cfg_reset(gt, vfid);
+ if (!err)
+ err = pf_push_full_vf_config(gt, vfid);
+
+ return err;
+}
+
+static int pf_refresh_vf_cfg(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_push_vf_cfg(gt, vfid, true);
+}
+
static u64 pf_get_ggtt_alignment(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -432,6 +452,10 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
return err;
pf_release_vf_config_ggtt(gt, config);
+
+ err = pf_refresh_vf_cfg(gt, vfid);
+ if (unlikely(err))
+ return err;
}
xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region));
@@ -757,6 +781,10 @@ static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctx
return ret;
pf_release_config_ctxs(gt, config);
+
+ ret = pf_refresh_vf_cfg(gt, vfid);
+ if (unlikely(ret))
+ return ret;
}
if (!num_ctxs)
@@ -1054,6 +1082,10 @@ static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
return ret;
pf_release_config_dbs(gt, config);
+
+ ret = pf_refresh_vf_cfg(gt, vfid);
+ if (unlikely(ret))
+ return ret;
}
if (!num_dbs)
@@ -2085,10 +2117,7 @@ int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh
xe_gt_assert(gt, vfid);
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
- if (refresh)
- err = pf_send_vf_cfg_reset(gt, vfid);
- if (!err)
- err = pf_push_full_vf_config(gt, vfid);
+ err = pf_push_vf_cfg(gt, vfid, refresh);
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
if (unlikely(err)) {
@@ -2320,6 +2349,35 @@ int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
return err;
}
+static void fini_config(void *arg)
+{
+ struct xe_gt *gt = arg;
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe);
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ for (n = 1; n <= total_vfs; n++)
+ pf_release_vf_config(gt, n);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+}
+
+/**
+ * xe_gt_sriov_pf_config_init - Initialize SR-IOV configuration data.
+ * @gt: the &xe_gt
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+
+ return devm_add_action_or_reset(xe->drm.dev, fini_config, gt);
+}
+
/**
* xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset.
* @gt: the &xe_gt
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
index f894e9d4abba..513e6512a575 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
@@ -63,6 +63,7 @@ int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_config_init(struct xe_gt *gt);
void xe_gt_sriov_pf_config_restart(struct xe_gt *gt);
int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
index 0426b1a77069..a64a6835ad65 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
@@ -36,7 +36,16 @@ struct xe_gt_sriov_metadata {
};
/**
+ * struct xe_gt_sriov_pf_workers - GT level workers used by the PF.
+ */
+struct xe_gt_sriov_pf_workers {
+ /** @restart: worker that executes actions post GT reset */
+ struct work_struct restart;
+};
+
+/**
* struct xe_gt_sriov_pf - GT level PF virtualization data.
+ * @workers: workers data.
* @service: service data.
* @control: control data.
* @policy: policy data.
@@ -45,6 +54,7 @@ struct xe_gt_sriov_metadata {
* @vfs: metadata for all VFs.
*/
struct xe_gt_sriov_pf {
+ struct xe_gt_sriov_pf_workers workers;
struct xe_gt_sriov_pf_service service;
struct xe_gt_sriov_pf_control control;
struct xe_gt_sriov_pf_policy policy;
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 9c30cbd9af6e..a439261bf4d7 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -47,12 +47,19 @@ static int guc_action_vf_reset(struct xe_guc *guc)
return ret > 0 ? -EPROTO : ret;
}
+#define GUC_RESET_VF_STATE_RETRY_MAX 10
static int vf_reset_guc_state(struct xe_gt *gt)
{
+ unsigned int retry = GUC_RESET_VF_STATE_RETRY_MAX;
struct xe_guc *guc = &gt->uc.guc;
int err;
- err = guc_action_vf_reset(guc);
+ do {
+ err = guc_action_vf_reset(guc);
+ if (!err || err != -ETIMEDOUT)
+ break;
+ } while (--retry);
+
if (unlikely(err))
xe_gt_sriov_err(gt, "Failed to reset GuC state (%pe)\n", ERR_PTR(err));
return err;
@@ -229,6 +236,9 @@ int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt)
{
int err;
+ if (!xe_device_uc_enabled(gt_to_xe(gt)))
+ return -ENODEV;
+
err = vf_reset_guc_state(gt);
if (unlikely(err))
return err;
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 9405d83d4db2..084cbdeba8ea 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -419,6 +419,28 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
}
/**
+ * xe_gt_tlb_invalidation_vm - Issue a TLB invalidation on this GT for a VM
+ * @gt: graphics tile
+ * @vm: VM to invalidate
+ *
+ * Invalidate entire VM's address space
+ */
+void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm)
+{
+ struct xe_gt_tlb_invalidation_fence fence;
+ u64 range = 1ull << vm->xe->info.va_bits;
+ int ret;
+
+ xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
+
+ ret = xe_gt_tlb_invalidation_range(gt, &fence, 0, range, vm->usm.asid);
+ if (ret < 0)
+ return;
+
+ xe_gt_tlb_invalidation_fence_wait(&fence);
+}
+
+/**
* xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
* @gt: GT structure
* @fence: invalidation fence which will be signal on TLB invalidation
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
index 672acfcdf0d7..abe9b03d543e 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -12,6 +12,7 @@
struct xe_gt;
struct xe_guc;
+struct xe_vm;
struct xe_vma;
int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt);
@@ -21,6 +22,7 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt);
int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
struct xe_vma *vma);
+void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm);
int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
u64 start, u64 end, u32 asid);
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 6e66bf0e8b3f..dd2969a1846d 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -413,6 +413,16 @@ struct xe_gt {
bool oob_initialized;
} wa_active;
+ /** @tuning_active: keep track of active tunings */
+ struct {
+ /** @tuning_active.gt: bitmap with active GT tunings */
+ unsigned long *gt;
+ /** @tuning_active.engine: bitmap with active engine tunings */
+ unsigned long *engine;
+ /** @tuning_active.lrc: bitmap with active LRC tunings */
+ unsigned long *lrc;
+ } tuning_active;
+
/** @user_engines: engines present in GT and available to userspace */
struct {
/**
diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c
index f6d523e4c5fe..9095618648bc 100644
--- a/drivers/gpu/drm/xe/xe_guc_capture.c
+++ b/drivers/gpu/drm/xe/xe_guc_capture.c
@@ -359,7 +359,7 @@ static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext,
ext->reg = XE_REG(extlist->reg.__reg.addr);
ext->flags = FIELD_PREP(GUC_REGSET_STEERING_NEEDED, 1);
- ext->flags = FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id);
+ ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id);
ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id);
ext->regname = extlist->name;
}
diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
index 0ca3056d8bd3..80514a446ba2 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.c
+++ b/drivers/gpu/drm/xe/xe_guc_log.c
@@ -149,16 +149,12 @@ struct xe_guc_log_snapshot *xe_guc_log_snapshot_capture(struct xe_guc_log *log,
size_t remain;
int i;
- if (!log->bo) {
- xe_gt_err(gt, "GuC log buffer not allocated\n");
+ if (!log->bo)
return NULL;
- }
snapshot = xe_guc_log_snapshot_alloc(log, atomic);
- if (!snapshot) {
- xe_gt_err(gt, "GuC log snapshot not allocated\n");
+ if (!snapshot)
return NULL;
- }
remain = snapshot->size;
for (i = 0; i < snapshot->num_chunks; i++) {
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index f382f5d53ca8..2276d85926fc 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -371,16 +371,17 @@ static void tgl_update_rpa_value(struct xe_guc_pc *pc)
u32 reg;
/*
- * For PVC we still need to use fused RP1 as the approximation for RPe
- * For other platforms than PVC we get the resolved RPe directly from
+ * For PVC we still need to use fused RP0 as the approximation for RPa
+ * For other platforms than PVC we get the resolved RPa directly from
* PCODE at a different register
*/
- if (xe->info.platform == XE_PVC)
+ if (xe->info.platform == XE_PVC) {
reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
- else
+ pc->rpa_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+ } else {
reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
-
- pc->rpa_freq = REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+ pc->rpa_freq = REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+ }
}
static void tgl_update_rpe_value(struct xe_guc_pc *pc)
@@ -394,12 +395,13 @@ static void tgl_update_rpe_value(struct xe_guc_pc *pc)
* For other platforms than PVC we get the resolved RPe directly from
* PCODE at a different register
*/
- if (xe->info.platform == XE_PVC)
+ if (xe->info.platform == XE_PVC) {
reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
- else
+ pc->rpe_freq = REG_FIELD_GET(RP1_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+ } else {
reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
-
- pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+ pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+ }
}
static void pc_update_rp_values(struct xe_guc_pc *pc)
diff --git a/drivers/gpu/drm/xe/xe_guc_relay.c b/drivers/gpu/drm/xe/xe_guc_relay.c
index 8f62de026724..e5dc94f3e618 100644
--- a/drivers/gpu/drm/xe/xe_guc_relay.c
+++ b/drivers/gpu/drm/xe/xe_guc_relay.c
@@ -225,7 +225,7 @@ __relay_get_transaction(struct xe_guc_relay *relay, bool incoming, u32 remote, u
* with CTB lock held which is marked as used in the reclaim path.
* Btw, that's one of the reason why we use mempool here!
*/
- txn = mempool_alloc(&relay->pool, incoming ? GFP_ATOMIC : GFP_KERNEL);
+ txn = mempool_alloc(&relay->pool, incoming ? GFP_ATOMIC : GFP_NOWAIT);
if (!txn)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index fc447751fe78..b26b6fb5cdb5 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -386,12 +386,6 @@ xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
blit_cctl_val,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
- /* Use Fixed slice CCS mode */
- { XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
- XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
- XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
- RCU_MODE_FIXED_SLICE_CCS_MODE))
- },
/* Disable WMTP if HW doesn't support it */
{ XE_RTP_NAME("DISABLE_WMTP_ON_UNSUPPORTED_HW"),
XE_RTP_RULES(FUNC(xe_rtp_cfeg_wmtp_disabled)),
@@ -400,10 +394,9 @@ xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
PREEMPT_GPGPU_THREAD_GROUP_LEVEL)),
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE)
},
- {}
};
- xe_rtp_process_to_sr(&ctx, lrc_setup, &hwe->reg_lrc);
+ xe_rtp_process_to_sr(&ctx, lrc_setup, ARRAY_SIZE(lrc_setup), &hwe->reg_lrc);
}
static void
@@ -459,10 +452,15 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
- {}
+ /* Use Fixed slice CCS mode */
+ { XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
+ XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
+ XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
+ RCU_MODE_FIXED_SLICE_CCS_MODE))
+ },
};
- xe_rtp_process_to_sr(&ctx, engine_entries, &hwe->reg_sr);
+ xe_rtp_process_to_sr(&ctx, engine_entries, ARRAY_SIZE(engine_entries), &hwe->reg_sr);
}
static const struct engine_info *find_engine_info(enum xe_engine_class class, int instance)
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index bbb9ffbf6367..5d7629bb6b8d 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -684,7 +684,7 @@ static inline u32 __xe_lrc_start_seqno_offset(struct xe_lrc *lrc)
static u32 __xe_lrc_ctx_job_timestamp_offset(struct xe_lrc *lrc)
{
- /* The start seqno is stored in the driver-defined portion of PPHWSP */
+ /* This is stored in the driver-defined portion of PPHWSP */
return xe_lrc_pphwsp_offset(lrc) + LRC_CTX_JOB_TIMESTAMP_OFFSET;
}
@@ -864,7 +864,7 @@ static void *empty_lrc_data(struct xe_hw_engine *hwe)
static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
{
- u64 desc = xe_vm_pdp4_descriptor(vm, lrc->tile);
+ u64 desc = xe_vm_pdp4_descriptor(vm, gt_to_tile(lrc->gt));
xe_lrc_write_ctx_reg(lrc, CTX_PDP0_UDW, upper_32_bits(desc));
xe_lrc_write_ctx_reg(lrc, CTX_PDP0_LDW, lower_32_bits(desc));
@@ -895,6 +895,7 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
int err;
kref_init(&lrc->refcount);
+ lrc->gt = gt;
lrc->flags = 0;
lrc_size = ring_size + xe_gt_lrc_size(gt, hwe->class);
if (xe_gt_has_indirect_ring_state(gt))
@@ -913,7 +914,6 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
return PTR_ERR(lrc->bo);
lrc->size = lrc_size;
- lrc->tile = gt_to_tile(hwe->gt);
lrc->ring.size = ring_size;
lrc->ring.tail = 0;
lrc->ctx_timestamp = 0;
diff --git a/drivers/gpu/drm/xe/xe_lrc_types.h b/drivers/gpu/drm/xe/xe_lrc_types.h
index 71ecb453f811..cd38586ae989 100644
--- a/drivers/gpu/drm/xe/xe_lrc_types.h
+++ b/drivers/gpu/drm/xe/xe_lrc_types.h
@@ -25,8 +25,8 @@ struct xe_lrc {
/** @size: size of lrc including any indirect ring state page */
u32 size;
- /** @tile: tile which this LRC belongs to */
- struct xe_tile *tile;
+ /** @gt: gt which this LRC belongs to */
+ struct xe_gt *gt;
/** @flags: LRC flags */
#define XE_LRC_FLAG_INDIRECT_RING_STATE 0x1
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index a48f239cad1c..9c2f60ce0c94 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -76,12 +76,12 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
* is fine as it's going to the root tile's mmio, that's
* guaranteed to be initialized earlier in xe_mmio_init()
*/
- mtcfg = xe_mmio_read64_2x32(mmio, XEHP_MTCFG_ADDR);
+ mtcfg = xe_mmio_read32(mmio, XEHP_MTCFG_ADDR);
tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
if (tile_count < xe->info.tile_count) {
drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n",
- xe->info.tile_count, tile_count);
+ xe->info.tile_count, tile_count);
xe->info.tile_count = tile_count;
/*
@@ -173,7 +173,7 @@ int xe_mmio_init(struct xe_device *xe)
*/
xe->mmio.size = pci_resource_len(pdev, GTTMMADR_BAR);
xe->mmio.regs = pci_iomap(pdev, GTTMMADR_BAR, 0);
- if (xe->mmio.regs == NULL) {
+ if (!xe->mmio.regs) {
drm_err(&xe->drm, "failed to map registers\n");
return -EIO;
}
@@ -338,8 +338,8 @@ u64 xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg)
return (u64)udw << 32 | ldw;
}
-static int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
- u32 *out_val, bool atomic, bool expect_match)
+static int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val,
+ u32 timeout_us, u32 *out_val, bool atomic, bool expect_match)
{
ktime_t cur = ktime_get_raw();
const ktime_t end = ktime_add_us(cur, timeout_us);
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index eb6cd91e1e22..abf37d9ab221 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -548,6 +548,7 @@ static ssize_t xe_oa_read(struct file *file, char __user *buf,
mutex_unlock(&stream->stream_lock);
} while (!offset && !ret);
} else {
+ xe_oa_buffer_check_unlocked(stream);
mutex_lock(&stream->stream_lock);
ret = __xe_oa_read(stream, buf, count, &offset);
mutex_unlock(&stream->stream_lock);
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 39be74848e44..d92b2e5885b9 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -150,7 +150,6 @@ static const struct xe_graphics_desc graphics_xehpc = {
};
static const struct xe_graphics_desc graphics_xelpg = {
- .name = "Xe_LPG",
.hw_engine_mask =
BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) |
BIT(XE_HW_ENGINE_CCS0),
@@ -174,8 +173,6 @@ static const struct xe_graphics_desc graphics_xelpg = {
GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)
static const struct xe_graphics_desc graphics_xe2 = {
- .name = "Xe2_LPG / Xe2_HPG / Xe3_LPG",
-
XE2_GFX_FEATURES,
};
@@ -200,15 +197,6 @@ static const struct xe_media_desc media_xehpm = {
};
static const struct xe_media_desc media_xelpmp = {
- .name = "Xe_LPM+",
- .hw_engine_mask =
- GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
- GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) |
- BIT(XE_HW_ENGINE_GSCCS0)
-};
-
-static const struct xe_media_desc media_xe2 = {
- .name = "Xe2_LPM / Xe2_HPM / Xe3_LPM",
.hw_engine_mask =
GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) |
@@ -357,21 +345,21 @@ __diag_pop();
/* Map of GMD_ID values to graphics IP */
static const struct gmdid_map graphics_ip_map[] = {
- { 1270, &graphics_xelpg },
- { 1271, &graphics_xelpg },
- { 1274, &graphics_xelpg }, /* Xe_LPG+ */
- { 2001, &graphics_xe2 },
- { 2004, &graphics_xe2 },
- { 3000, &graphics_xe2 },
- { 3001, &graphics_xe2 },
+ { 1270, "Xe_LPG", &graphics_xelpg },
+ { 1271, "Xe_LPG", &graphics_xelpg },
+ { 1274, "Xe_LPG+", &graphics_xelpg },
+ { 2001, "Xe2_HPG", &graphics_xe2 },
+ { 2004, "Xe2_LPG", &graphics_xe2 },
+ { 3000, "Xe3_LPG", &graphics_xe2 },
+ { 3001, "Xe3_LPG", &graphics_xe2 },
};
/* Map of GMD_ID values to media IP */
static const struct gmdid_map media_ip_map[] = {
- { 1300, &media_xelpmp },
- { 1301, &media_xe2 },
- { 2000, &media_xe2 },
- { 3000, &media_xe2 },
+ { 1300, "Xe_LPM+", &media_xelpmp },
+ { 1301, "Xe2_HPM", &media_xelpmp },
+ { 2000, "Xe2_LPM", &media_xelpmp },
+ { 3000, "Xe3_LPM", &media_xelpmp },
};
/*
@@ -502,6 +490,7 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver,
gt->info.type = XE_GT_TYPE_MAIN;
}
+ xe_gt_mmio_init(gt);
xe_guc_comm_init_early(&gt->uc.guc);
/* Don't bother with GMDID if failed to negotiate the GuC ABI */
@@ -566,6 +555,7 @@ static void handle_gmdid(struct xe_device *xe,
for (int i = 0; i < ARRAY_SIZE(graphics_ip_map); i++) {
if (ver == graphics_ip_map[i].ver) {
xe->info.graphics_verx100 = ver;
+ xe->info.graphics_name = graphics_ip_map[i].name;
*graphics = graphics_ip_map[i].ip;
break;
@@ -586,6 +576,7 @@ static void handle_gmdid(struct xe_device *xe,
for (int i = 0; i < ARRAY_SIZE(media_ip_map); i++) {
if (ver == media_ip_map[i].ver) {
xe->info.media_verx100 = ver;
+ xe->info.media_name = media_ip_map[i].name;
*media = media_ip_map[i].ip;
break;
diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.c b/drivers/gpu/drm/xe/xe_pci_sriov.c
index aaceee748287..09ee8a06fe2e 100644
--- a/drivers/gpu/drm/xe/xe_pci_sriov.c
+++ b/drivers/gpu/drm/xe/xe_pci_sriov.c
@@ -62,6 +62,55 @@ static void pf_reset_vfs(struct xe_device *xe, unsigned int num_vfs)
xe_gt_sriov_pf_control_trigger_flr(gt, n);
}
+static struct pci_dev *xe_pci_pf_get_vf_dev(struct xe_device *xe, unsigned int vf_id)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ /* caller must use pci_dev_put() */
+ return pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
+ pdev->bus->number,
+ pci_iov_virtfn_devfn(pdev, vf_id));
+}
+
+static void pf_link_vfs(struct xe_device *xe, int num_vfs)
+{
+ struct pci_dev *pdev_pf = to_pci_dev(xe->drm.dev);
+ struct device_link *link;
+ struct pci_dev *pdev_vf;
+ unsigned int n;
+
+ /*
+ * When both PF and VF devices are enabled on the host, during system
+ * resume they are resuming in parallel.
+ *
+ * But PF has to complete the provision of VF first to allow any VFs to
+ * successfully resume.
+ *
+ * Create a parent-child device link between PF and VF devices that will
+ * enforce correct resume order.
+ */
+ for (n = 1; n <= num_vfs; n++) {
+ pdev_vf = xe_pci_pf_get_vf_dev(xe, n - 1);
+
+ /* unlikely, something weird is happening, abort */
+ if (!pdev_vf) {
+ xe_sriov_err(xe, "Cannot find VF%u device, aborting link%s creation!\n",
+ n, str_plural(num_vfs));
+ break;
+ }
+
+ link = device_link_add(&pdev_vf->dev, &pdev_pf->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ /* unlikely and harmless, continue with other VFs */
+ if (!link)
+ xe_sriov_notice(xe, "Failed linking VF%u\n", n);
+
+ pci_dev_put(pdev_vf);
+ }
+}
+
static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -92,6 +141,8 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
if (err < 0)
goto failed;
+ pf_link_vfs(xe, num_vfs);
+
xe_sriov_info(xe, "Enabled %u of %u VF%s\n",
num_vfs, total_vfs, str_plural(total_vfs));
return num_vfs;
diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h
index 79b0f80376a4..665b4447b2eb 100644
--- a/drivers/gpu/drm/xe/xe_pci_types.h
+++ b/drivers/gpu/drm/xe/xe_pci_types.h
@@ -44,6 +44,7 @@ struct xe_media_desc {
struct gmdid_map {
unsigned int ver;
+ const char *name;
const void *ip;
};
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index dc24baa84092..148d90611eeb 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -219,6 +219,20 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
}
/**
+ * xe_pt_clear() - Clear a page-table.
+ * @xe: xe device.
+ * @pt: The page-table.
+ *
+ * Clears page-table by setting to zero.
+ */
+void xe_pt_clear(struct xe_device *xe, struct xe_pt *pt)
+{
+ struct iosys_map *map = &pt->bo->vmap;
+
+ xe_map_memset(xe, map, 0, 0, SZ_4K);
+}
+
+/**
* DOC: Pagetable building
*
* Below we use the term "page-table" for both page-directories, containing
diff --git a/drivers/gpu/drm/xe/xe_pt.h b/drivers/gpu/drm/xe/xe_pt.h
index 9ab386431cad..8e43912ae8e9 100644
--- a/drivers/gpu/drm/xe/xe_pt.h
+++ b/drivers/gpu/drm/xe/xe_pt.h
@@ -13,6 +13,7 @@ struct dma_fence;
struct xe_bo;
struct xe_device;
struct xe_exec_queue;
+struct xe_svm_range;
struct xe_sync_entry;
struct xe_tile;
struct xe_vm;
@@ -35,6 +36,8 @@ void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred);
+void xe_pt_clear(struct xe_device *xe, struct xe_pt *pt);
+
int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops);
struct dma_fence *xe_pt_update_ops_run(struct xe_tile *tile,
struct xe_vma_ops *vops);
diff --git a/drivers/gpu/drm/xe/xe_reg_whitelist.c b/drivers/gpu/drm/xe/xe_reg_whitelist.c
index edab5d4e3ba5..23f6c81d9994 100644
--- a/drivers/gpu/drm/xe/xe_reg_whitelist.c
+++ b/drivers/gpu/drm/xe/xe_reg_whitelist.c
@@ -88,7 +88,6 @@ static const struct xe_rtp_entry_sr register_whitelist[] = {
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4))
},
- {}
};
static void whitelist_apply_to_hwe(struct xe_hw_engine *hwe)
@@ -137,7 +136,8 @@ void xe_reg_whitelist_process_engine(struct xe_hw_engine *hwe)
{
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
- xe_rtp_process_to_sr(&ctx, register_whitelist, &hwe->reg_whitelist);
+ xe_rtp_process_to_sr(&ctx, register_whitelist, ARRAY_SIZE(register_whitelist),
+ &hwe->reg_whitelist);
whitelist_apply_to_hwe(hwe);
}
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index 9f327f27c072..349317794768 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -141,7 +141,8 @@ emit_pipe_control(u32 *dw, int i, u32 bit_group_0, u32 bit_group_1, u32 offset,
static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw,
int i)
{
- u32 flags = PIPE_CONTROL_CS_STALL |
+ u32 flags0 = 0;
+ u32 flags1 = PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_COMMAND_CACHE_INVALIDATE |
PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
@@ -152,11 +153,15 @@ static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw,
PIPE_CONTROL_STORE_DATA_INDEX;
if (invalidate_tlb)
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags1 |= PIPE_CONTROL_TLB_INVALIDATE;
- flags &= ~mask_flags;
+ flags1 &= ~mask_flags;
- return emit_pipe_control(dw, i, 0, flags, LRC_PPHWSP_SCRATCH_ADDR, 0);
+ if (flags1 & PIPE_CONTROL_VF_CACHE_INVALIDATE)
+ flags0 |= PIPE_CONTROL0_L3_READ_ONLY_CACHE_INVALIDATE;
+
+ return emit_pipe_control(dw, i, flags0, flags1,
+ LRC_PPHWSP_SCRATCH_ADDR, 0);
}
static int emit_store_imm_ppgtt_posted(u64 addr, u64 value,
@@ -229,13 +234,10 @@ static u32 get_ppgtt_flag(struct xe_sched_job *job)
static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i)
{
- dw[i++] = MI_COPY_MEM_MEM | MI_COPY_MEM_MEM_SRC_GGTT |
- MI_COPY_MEM_MEM_DST_GGTT;
+ dw[i++] = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET;
+ dw[i++] = RING_CTX_TIMESTAMP(0).addr;
dw[i++] = xe_lrc_ctx_job_timestamp_ggtt_addr(lrc);
dw[i++] = 0;
- dw[i++] = xe_lrc_ctx_timestamp_ggtt_addr(lrc);
- dw[i++] = 0;
- dw[i++] = MI_NOOP;
return i;
}
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index 7a1c78fdfc92..13bb62d3e615 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -237,6 +237,7 @@ static void rtp_mark_active(struct xe_device *xe,
* the save-restore argument.
* @ctx: The context for processing the table, with one of device, gt or hwe
* @entries: Table with RTP definitions
+ * @n_entries: Number of entries to process, usually ARRAY_SIZE(entries)
* @sr: Save-restore struct where matching rules execute the action. This can be
* viewed as the "coalesced view" of multiple the tables. The bits for each
* register set are expected not to collide with previously added entries
@@ -247,6 +248,7 @@ static void rtp_mark_active(struct xe_device *xe,
*/
void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
const struct xe_rtp_entry_sr *entries,
+ size_t n_entries,
struct xe_reg_sr *sr)
{
const struct xe_rtp_entry_sr *entry;
@@ -259,7 +261,9 @@ void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
if (IS_SRIOV_VF(xe))
return;
- for (entry = entries; entry && entry->name; entry++) {
+ xe_assert(xe, entries);
+
+ for (entry = entries; entry - entries < n_entries; entry++) {
bool match = false;
if (entry->flags & XE_RTP_ENTRY_FLAG_FOREACH_ENGINE) {
diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h
index 38b9f13bba5e..4fe736a11c42 100644
--- a/drivers/gpu/drm/xe/xe_rtp.h
+++ b/drivers/gpu/drm/xe/xe_rtp.h
@@ -430,7 +430,7 @@ void xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx *ctx,
void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
const struct xe_rtp_entry_sr *entries,
- struct xe_reg_sr *sr);
+ size_t n_entries, struct xe_reg_sr *sr);
void xe_rtp_process(struct xe_rtp_process_ctx *ctx,
const struct xe_rtp_entry *entries);
diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c
index e055bed7ae55..4e7aba445ebc 100644
--- a/drivers/gpu/drm/xe/xe_sa.c
+++ b/drivers/gpu/drm/xe/xe_sa.c
@@ -57,8 +57,6 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
}
sa_manager->bo = bo;
sa_manager->is_iomem = bo->vmap.is_iomem;
-
- drm_suballoc_manager_init(&sa_manager->base, managed_size, align);
sa_manager->gpu_addr = xe_bo_ggtt_addr(bo);
if (bo->vmap.is_iomem) {
@@ -72,6 +70,7 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
memset(sa_manager->cpu_ptr, 0, bo->ttm.base.size);
}
+ drm_suballoc_manager_init(&sa_manager->base, managed_size, align);
ret = drmm_add_action_or_reset(&xe->drm, xe_sa_bo_manager_fini,
sa_manager);
if (ret)
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
index 07cf7cfe4abd..377438ea6b83 100644
--- a/drivers/gpu/drm/xe/xe_tile.c
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -170,17 +170,19 @@ int xe_tile_init_noalloc(struct xe_tile *tile)
if (err)
return err;
+ xe_wa_apply_tile_workarounds(tile);
+
+ return xe_tile_sysfs_init(tile);
+}
+
+int xe_tile_init(struct xe_tile *tile)
+{
tile->mem.kernel_bb_pool = xe_sa_bo_manager_init(tile, SZ_1M, 16);
if (IS_ERR(tile->mem.kernel_bb_pool))
return PTR_ERR(tile->mem.kernel_bb_pool);
- xe_wa_apply_tile_workarounds(tile);
-
- err = xe_tile_sysfs_init(tile);
-
return 0;
}
-
void xe_tile_migrate_wait(struct xe_tile *tile)
{
xe_migrate_wait(tile->migrate);
diff --git a/drivers/gpu/drm/xe/xe_tile.h b/drivers/gpu/drm/xe/xe_tile.h
index 1c9e42ade6b0..eb939316d55b 100644
--- a/drivers/gpu/drm/xe/xe_tile.h
+++ b/drivers/gpu/drm/xe/xe_tile.h
@@ -12,6 +12,7 @@ struct xe_tile;
int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id);
int xe_tile_init_noalloc(struct xe_tile *tile);
+int xe_tile_init(struct xe_tile *tile);
void xe_tile_migrate_wait(struct xe_tile *tile);
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
index d414421f8c13..d9c9d2547aad 100644
--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
@@ -207,17 +207,16 @@ static u64 detect_stolen(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
#endif
}
-void xe_ttm_stolen_mgr_init(struct xe_device *xe)
+int xe_ttm_stolen_mgr_init(struct xe_device *xe)
{
- struct xe_ttm_stolen_mgr *mgr = drmm_kzalloc(&xe->drm, sizeof(*mgr), GFP_KERNEL);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ struct xe_ttm_stolen_mgr *mgr;
u64 stolen_size, io_size;
int err;
- if (!mgr) {
- drm_dbg_kms(&xe->drm, "Stolen mgr init failed\n");
- return;
- }
+ mgr = drmm_kzalloc(&xe->drm, sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return -ENOMEM;
if (IS_SRIOV_VF(xe))
stolen_size = 0;
@@ -230,7 +229,7 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
if (!stolen_size) {
drm_dbg_kms(&xe->drm, "No stolen memory support\n");
- return;
+ return 0;
}
/*
@@ -246,7 +245,7 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
io_size, PAGE_SIZE);
if (err) {
drm_dbg_kms(&xe->drm, "Stolen mgr init failed: %i\n", err);
- return;
+ return err;
}
drm_dbg_kms(&xe->drm, "Initialized stolen memory support with %llu bytes\n",
@@ -254,6 +253,8 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
if (io_size)
mgr->mapping = devm_ioremap_wc(&pdev->dev, mgr->io_base, io_size);
+
+ return 0;
}
u64 xe_ttm_stolen_io_offset(struct xe_bo *bo, u32 offset)
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h
index 1777245ff810..8e877d1e839b 100644
--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h
@@ -12,7 +12,7 @@ struct ttm_resource;
struct xe_bo;
struct xe_device;
-void xe_ttm_stolen_mgr_init(struct xe_device *xe);
+int xe_ttm_stolen_mgr_init(struct xe_device *xe);
int xe_ttm_stolen_io_mem_reserve(struct xe_device *xe, struct ttm_resource *mem);
bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe);
u64 xe_ttm_stolen_io_offset(struct xe_bo *bo, u32 offset);
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
index 3c78f3d71559..a61a2917590f 100644
--- a/drivers/gpu/drm/xe/xe_tuning.c
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -7,6 +7,8 @@
#include <kunit/visibility.h>
+#include <drm/drm_managed.h>
+
#include "regs/xe_gt_regs.h"
#include "xe_gt_types.h"
#include "xe_platform_types.h"
@@ -83,8 +85,6 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
XE_RTP_RULES(MEDIA_VERSION(2000)),
XE_RTP_ACTIONS(SET(XE2LPM_SCRATCH3_LBCF, RWFLUSHALLEN))
},
-
- {}
};
static const struct xe_rtp_entry_sr engine_tunings[] = {
@@ -93,7 +93,6 @@ static const struct xe_rtp_entry_sr engine_tunings[] = {
ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(SAMPLER_MODE, INDIRECT_STATE_BASE_ADDR_OVERRIDE))
},
- {}
};
static const struct xe_rtp_entry_sr lrc_tunings[] = {
@@ -131,15 +130,47 @@ static const struct xe_rtp_entry_sr lrc_tunings[] = {
XE_RTP_ACTIONS(FIELD_SET(FF_MODE, VS_HIT_MAX_VALUE_MASK,
REG_FIELD_PREP(VS_HIT_MAX_VALUE_MASK, 0x3f)))
},
-
- {}
};
+/**
+ * xe_tuning_init - initialize gt with tunings bookkeeping
+ * @gt: GT instance to initialize
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int xe_tuning_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ size_t n_lrc, n_engine, n_gt, total;
+ unsigned long *p;
+
+ n_gt = BITS_TO_LONGS(ARRAY_SIZE(gt_tunings));
+ n_engine = BITS_TO_LONGS(ARRAY_SIZE(engine_tunings));
+ n_lrc = BITS_TO_LONGS(ARRAY_SIZE(lrc_tunings));
+ total = n_gt + n_engine + n_lrc;
+
+ p = drmm_kzalloc(&xe->drm, sizeof(*p) * total, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ gt->tuning_active.gt = p;
+ p += n_gt;
+ gt->tuning_active.engine = p;
+ p += n_engine;
+ gt->tuning_active.lrc = p;
+
+ return 0;
+}
+ALLOW_ERROR_INJECTION(xe_tuning_init, ERRNO); /* See xe_pci_probe() */
+
void xe_tuning_process_gt(struct xe_gt *gt)
{
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
- xe_rtp_process_to_sr(&ctx, gt_tunings, &gt->reg_sr);
+ xe_rtp_process_ctx_enable_active_tracking(&ctx,
+ gt->tuning_active.gt,
+ ARRAY_SIZE(gt_tunings));
+ xe_rtp_process_to_sr(&ctx, gt_tunings, ARRAY_SIZE(gt_tunings), &gt->reg_sr);
}
EXPORT_SYMBOL_IF_KUNIT(xe_tuning_process_gt);
@@ -147,7 +178,11 @@ void xe_tuning_process_engine(struct xe_hw_engine *hwe)
{
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
- xe_rtp_process_to_sr(&ctx, engine_tunings, &hwe->reg_sr);
+ xe_rtp_process_ctx_enable_active_tracking(&ctx,
+ hwe->gt->tuning_active.engine,
+ ARRAY_SIZE(engine_tunings));
+ xe_rtp_process_to_sr(&ctx, engine_tunings, ARRAY_SIZE(engine_tunings),
+ &hwe->reg_sr);
}
EXPORT_SYMBOL_IF_KUNIT(xe_tuning_process_engine);
@@ -163,5 +198,25 @@ void xe_tuning_process_lrc(struct xe_hw_engine *hwe)
{
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
- xe_rtp_process_to_sr(&ctx, lrc_tunings, &hwe->reg_lrc);
+ xe_rtp_process_ctx_enable_active_tracking(&ctx,
+ hwe->gt->tuning_active.lrc,
+ ARRAY_SIZE(lrc_tunings));
+ xe_rtp_process_to_sr(&ctx, lrc_tunings, ARRAY_SIZE(lrc_tunings), &hwe->reg_lrc);
+}
+
+void xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p)
+{
+ size_t idx;
+
+ drm_printf(p, "GT Tunings\n");
+ for_each_set_bit(idx, gt->tuning_active.gt, ARRAY_SIZE(gt_tunings))
+ drm_printf_indent(p, 1, "%s\n", gt_tunings[idx].name);
+
+ drm_printf(p, "\nEngine Tunings\n");
+ for_each_set_bit(idx, gt->tuning_active.engine, ARRAY_SIZE(engine_tunings))
+ drm_printf_indent(p, 1, "%s\n", engine_tunings[idx].name);
+
+ drm_printf(p, "\nLRC Tunings\n");
+ for_each_set_bit(idx, gt->tuning_active.lrc, ARRAY_SIZE(lrc_tunings))
+ drm_printf_indent(p, 1, "%s\n", lrc_tunings[idx].name);
}
diff --git a/drivers/gpu/drm/xe/xe_tuning.h b/drivers/gpu/drm/xe/xe_tuning.h
index 4f9c3ac3b516..dd0d3ccc9c65 100644
--- a/drivers/gpu/drm/xe/xe_tuning.h
+++ b/drivers/gpu/drm/xe/xe_tuning.h
@@ -6,11 +6,14 @@
#ifndef _XE_TUNING_
#define _XE_TUNING_
+struct drm_printer;
struct xe_gt;
struct xe_hw_engine;
+int xe_tuning_init(struct xe_gt *gt);
void xe_tuning_process_gt(struct xe_gt *gt);
void xe_tuning_process_engine(struct xe_hw_engine *hwe);
void xe_tuning_process_lrc(struct xe_hw_engine *hwe);
+void xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p);
#endif
diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
index 0d073a9987c2..bb03c524613f 100644
--- a/drivers/gpu/drm/xe/xe_uc.c
+++ b/drivers/gpu/drm/xe/xe_uc.c
@@ -241,7 +241,7 @@ void xe_uc_gucrc_disable(struct xe_uc *uc)
void xe_uc_stop_prepare(struct xe_uc *uc)
{
- xe_gsc_wait_for_worker_completion(&uc->gsc);
+ xe_gsc_stop_prepare(&uc->gsc);
xe_guc_stop_prepare(&uc->guc);
}
@@ -275,6 +275,12 @@ again:
goto again;
}
+void xe_uc_suspend_prepare(struct xe_uc *uc)
+{
+ xe_gsc_wait_for_worker_completion(&uc->gsc);
+ xe_guc_stop_prepare(&uc->guc);
+}
+
int xe_uc_suspend(struct xe_uc *uc)
{
/* GuC submission not enabled, nothing to do */
diff --git a/drivers/gpu/drm/xe/xe_uc.h b/drivers/gpu/drm/xe/xe_uc.h
index 506517c11333..ba2937ab94cf 100644
--- a/drivers/gpu/drm/xe/xe_uc.h
+++ b/drivers/gpu/drm/xe/xe_uc.h
@@ -18,6 +18,7 @@ int xe_uc_reset_prepare(struct xe_uc *uc);
void xe_uc_stop_prepare(struct xe_uc *uc);
void xe_uc_stop(struct xe_uc *uc);
int xe_uc_start(struct xe_uc *uc);
+void xe_uc_suspend_prepare(struct xe_uc *uc);
int xe_uc_suspend(struct xe_uc *uc);
int xe_uc_sanitize_reset(struct xe_uc *uc);
void xe_uc_remove(struct xe_uc *uc);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 5956631c0d40..785b8960050b 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -8,6 +8,7 @@
#include <linux/dma-fence-array.h>
#include <linux/nospec.h>
+#include <drm/drm_drv.h>
#include <drm/drm_exec.h>
#include <drm/drm_print.h>
#include <drm/ttm/ttm_tt.h>
@@ -1582,9 +1583,40 @@ err_no_resv:
static void xe_vm_close(struct xe_vm *vm)
{
+ struct xe_device *xe = vm->xe;
+ bool bound;
+ int idx;
+
+ bound = drm_dev_enter(&xe->drm, &idx);
+
down_write(&vm->lock);
+
vm->size = 0;
+
+ if (!((vm->flags & XE_VM_FLAG_MIGRATION))) {
+ struct xe_tile *tile;
+ struct xe_gt *gt;
+ u8 id;
+
+ /* Wait for pending binds */
+ dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+
+ if (bound) {
+ for_each_tile(tile, xe, id)
+ if (vm->pt_root[id])
+ xe_pt_clear(xe, vm->pt_root[id]);
+
+ for_each_gt(gt, xe, id)
+ xe_gt_tlb_invalidation_vm(gt, vm);
+ }
+ }
+
up_write(&vm->lock);
+
+ if (bound)
+ drm_dev_exit(idx);
}
void xe_vm_close_and_put(struct xe_vm *vm)
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index 2553accf8c51..56257430b364 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -279,8 +279,6 @@ static const struct xe_rtp_entry_sr gt_was[] = {
XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), RAMDFTUNIT_CLKGATE_DIS)),
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
},
-
- {}
};
static const struct xe_rtp_entry_sr engine_was[] = {
@@ -613,8 +611,16 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_ACTIONS(FIELD_SET(SAMPLER_MODE, SMP_WAIT_FETCH_MERGING_COUNTER,
SMP_FORCE_128B_OVERFETCH))
},
-
- {}
+ { XE_RTP_NAME("14023061436"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(TDL_CHICKEN, QID_WAIT_FOR_THREAD_NOT_RUN_DISABLE))
+ },
+ { XE_RTP_NAME("13012615864"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, RES_CHK_SPR_DIS))
+ },
};
static const struct xe_rtp_entry_sr lrc_was[] = {
@@ -795,6 +801,10 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX))
},
+ { XE_RTP_NAME("22021007897"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE))
+ },
/* Xe3_LPG */
{ XE_RTP_NAME("14021490052"),
@@ -807,8 +817,6 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
DIS_PARTIAL_AUTOSTRIP |
DIS_AUTOSTRIP))
},
-
- {}
};
static __maybe_unused const struct xe_rtp_entry oob_was[] = {
@@ -850,7 +858,7 @@ void xe_wa_process_gt(struct xe_gt *gt)
xe_rtp_process_ctx_enable_active_tracking(&ctx, gt->wa_active.gt,
ARRAY_SIZE(gt_was));
- xe_rtp_process_to_sr(&ctx, gt_was, &gt->reg_sr);
+ xe_rtp_process_to_sr(&ctx, gt_was, ARRAY_SIZE(gt_was), &gt->reg_sr);
}
EXPORT_SYMBOL_IF_KUNIT(xe_wa_process_gt);
@@ -868,7 +876,7 @@ void xe_wa_process_engine(struct xe_hw_engine *hwe)
xe_rtp_process_ctx_enable_active_tracking(&ctx, hwe->gt->wa_active.engine,
ARRAY_SIZE(engine_was));
- xe_rtp_process_to_sr(&ctx, engine_was, &hwe->reg_sr);
+ xe_rtp_process_to_sr(&ctx, engine_was, ARRAY_SIZE(engine_was), &hwe->reg_sr);
}
/**
@@ -885,7 +893,7 @@ void xe_wa_process_lrc(struct xe_hw_engine *hwe)
xe_rtp_process_ctx_enable_active_tracking(&ctx, hwe->gt->wa_active.lrc,
ARRAY_SIZE(lrc_was));
- xe_rtp_process_to_sr(&ctx, lrc_was, &hwe->reg_lrc);
+ xe_rtp_process_to_sr(&ctx, lrc_was, ARRAY_SIZE(lrc_was), &hwe->reg_lrc);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index 40438c3d9b72..32d3853b08ec 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -30,8 +30,10 @@
13011645652 GRAPHICS_VERSION(2004)
14022293748 GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2004)
+ GRAPHICS_VERSION_RANGE(3000, 3001)
22019794406 GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2004)
+ GRAPHICS_VERSION_RANGE(3000, 3001)
22019338487 MEDIA_VERSION(2000)
GRAPHICS_VERSION(2001)
MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_not_sriov_vf)
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 4cfea399ebab..76be97c5fc2f 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -603,6 +603,7 @@ config HID_LOGITECH
tristate "Logitech devices"
depends on USB_HID
depends on LEDS_CLASS
+ depends on LEDS_CLASS_MULTICOLOR
default !EXPERT
help
Support for Logitech devices that are not fully compliant with HID standard.
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
index e9929c4aa72e..08acc707938d 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
@@ -83,6 +83,9 @@ static int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
case ALS_IDX:
privdata->dev_en.is_als_present = false;
break;
+ case SRA_IDX:
+ privdata->dev_en.is_sra_present = false;
+ break;
}
if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
@@ -134,9 +137,6 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
for (i = 0; i < cl_data->num_hid_devices; i++) {
cl_data->sensor_sts[i] = SENSOR_DISABLED;
- if (cl_data->num_hid_devices == 1 && cl_data->sensor_idx[0] == SRA_IDX)
- break;
-
if (cl_data->sensor_idx[i] == SRA_IDX) {
info.sensor_idx = cl_data->sensor_idx[i];
writel(0, privdata->mmio + amd_get_p2c_val(privdata, 0));
@@ -145,8 +145,10 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
(privdata, cl_data->sensor_idx[i], ENABLE_SENSOR);
cl_data->sensor_sts[i] = (status == 0) ? SENSOR_ENABLED : SENSOR_DISABLED;
- if (cl_data->sensor_sts[i] == SENSOR_ENABLED)
+ if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
+ cl_data->is_any_sensor_enabled = true;
privdata->dev_en.is_sra_present = true;
+ }
continue;
}
@@ -236,6 +238,8 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
cleanup:
amd_sfh_hid_client_deinit(privdata);
for (i = 0; i < cl_data->num_hid_devices; i++) {
+ if (cl_data->sensor_idx[i] == SRA_IDX)
+ continue;
devm_kfree(dev, cl_data->feature_report[i]);
devm_kfree(dev, in_data->input_report[i]);
devm_kfree(dev, cl_data->report_descr[i]);
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c
index 2e96ec6a3073..9a06f9b0e4ef 100644
--- a/drivers/hid/bpf/hid_bpf_dispatch.c
+++ b/drivers/hid/bpf/hid_bpf_dispatch.c
@@ -38,6 +38,9 @@ dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type
struct hid_bpf_ops *e;
int ret;
+ if (unlikely(hdev->bpf.destroyed))
+ return ERR_PTR(-ENODEV);
+
if (type >= HID_REPORT_TYPES)
return ERR_PTR(-EINVAL);
@@ -93,6 +96,9 @@ int dispatch_hid_bpf_raw_requests(struct hid_device *hdev,
struct hid_bpf_ops *e;
int ret, idx;
+ if (unlikely(hdev->bpf.destroyed))
+ return -ENODEV;
+
if (rtype >= HID_REPORT_TYPES)
return -EINVAL;
@@ -130,6 +136,9 @@ int dispatch_hid_bpf_output_report(struct hid_device *hdev,
struct hid_bpf_ops *e;
int ret, idx;
+ if (unlikely(hdev->bpf.destroyed))
+ return -ENODEV;
+
idx = srcu_read_lock(&hdev->bpf.srcu);
list_for_each_entry_srcu(e, &hdev->bpf.prog_list, list,
srcu_read_lock_held(&hdev->bpf.srcu)) {
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 288a2b864cc4..1062731315a2 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -41,6 +41,10 @@
#define USB_VENDOR_ID_ACTIONSTAR 0x2101
#define USB_DEVICE_ID_ACTIONSTAR_1011 0x1011
+#define USB_VENDOR_ID_ADATA_XPG 0x125f
+#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE 0x7505
+#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE 0x7506
+
#define USB_VENDOR_ID_ADS_TECH 0x06e1
#define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X 0xa155
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 5d7a418ccdbe..73979643315b 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -27,6 +27,8 @@
static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_GAMEPAD), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016), HID_QUIRK_FULLSPEED_INTERVAL },
{ HID_USB_DEVICE(USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX), HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
index 3b81468a1df2..0bf70664c35e 100644
--- a/drivers/hid/hid-thrustmaster.c
+++ b/drivers/hid/hid-thrustmaster.c
@@ -174,6 +174,7 @@ static void thrustmaster_interrupts(struct hid_device *hdev)
u8 ep_addr[2] = {b_ep, 0};
if (!usb_check_int_endpoints(usbif, ep_addr)) {
+ kfree(send_buf);
hid_err(hdev, "Unexpected non-int endpoint\n");
return;
}
diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
index d8008933c052..321c43fb06ae 100644
--- a/drivers/hid/hid-uclogic-core.c
+++ b/drivers/hid/hid-uclogic-core.c
@@ -142,11 +142,12 @@ static int uclogic_input_configured(struct hid_device *hdev,
suffix = "System Control";
break;
}
- }
-
- if (suffix)
+ } else {
hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
"%s %s", hdev->name, suffix);
+ if (!hi->input->name)
+ return -ENOMEM;
+ }
return 0;
}
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index c439ed2f16db..af6bc76dbf64 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -160,7 +160,7 @@ static int usb_kbd_event(struct input_dev *dev, unsigned int type,
return -1;
spin_lock_irqsave(&kbd->leds_lock, flags);
- kbd->newleds = (!!test_bit(LED_KANA, dev->led) << 3) | (!!test_bit(LED_COMPOSE, dev->led) << 3) |
+ kbd->newleds = (!!test_bit(LED_KANA, dev->led) << 4) | (!!test_bit(LED_COMPOSE, dev->led) << 3) |
(!!test_bit(LED_SCROLLL, dev->led) << 2) | (!!test_bit(LED_CAPSL, dev->led) << 1) |
(!!test_bit(LED_NUML, dev->led));
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index fb8cd8469328..35f26fa1ffe7 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -1077,68 +1077,10 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
EXPORT_SYMBOL(vmbus_sendpacket);
/*
- * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
- * packets using a GPADL Direct packet type. This interface allows you
- * to control notifying the host. This will be useful for sending
- * batched data. Also the sender can control the send flags
- * explicitly.
- */
-int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
- struct hv_page_buffer pagebuffers[],
- u32 pagecount, void *buffer, u32 bufferlen,
- u64 requestid)
-{
- int i;
- struct vmbus_channel_packet_page_buffer desc;
- u32 descsize;
- u32 packetlen;
- u32 packetlen_aligned;
- struct kvec bufferlist[3];
- u64 aligned_data = 0;
-
- if (pagecount > MAX_PAGE_BUFFER_COUNT)
- return -EINVAL;
-
- /*
- * Adjust the size down since vmbus_channel_packet_page_buffer is the
- * largest size we support
- */
- descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
- ((MAX_PAGE_BUFFER_COUNT - pagecount) *
- sizeof(struct hv_page_buffer));
- packetlen = descsize + bufferlen;
- packetlen_aligned = ALIGN(packetlen, sizeof(u64));
-
- /* Setup the descriptor */
- desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
- desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
- desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
- desc.length8 = (u16)(packetlen_aligned >> 3);
- desc.transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
- desc.reserved = 0;
- desc.rangecount = pagecount;
-
- for (i = 0; i < pagecount; i++) {
- desc.range[i].len = pagebuffers[i].len;
- desc.range[i].offset = pagebuffers[i].offset;
- desc.range[i].pfn = pagebuffers[i].pfn;
- }
-
- bufferlist[0].iov_base = &desc;
- bufferlist[0].iov_len = descsize;
- bufferlist[1].iov_base = buffer;
- bufferlist[1].iov_len = bufferlen;
- bufferlist[2].iov_base = &aligned_data;
- bufferlist[2].iov_len = (packetlen_aligned - packetlen);
-
- return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL);
-}
-EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
-
-/*
- * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
+ * vmbus_sendpacket_mpb_desc - Send one or more multi-page buffer packets
* using a GPADL Direct packet type.
- * The buffer includes the vmbus descriptor.
+ * The desc argument must include space for the VMBus descriptor. The
+ * rangecount field must already be set.
*/
int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
struct vmbus_packet_mpb_array *desc,
@@ -1160,7 +1102,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
desc->length8 = (u16)(packetlen_aligned >> 3);
desc->transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
desc->reserved = 0;
- desc->rangecount = 1;
bufferlist[0].iov_base = desc;
bufferlist[0].iov_len = desc_size;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 29780f3a7478..0b450e53161e 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -477,4 +477,10 @@ static inline int hv_debug_add_dev_dir(struct hv_device *dev)
#endif /* CONFIG_HYPERV_TESTING */
+/* Create and remove sysfs entry for memory mapped ring buffers for a channel */
+int hv_create_ring_sysfs(struct vmbus_channel *channel,
+ int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel,
+ struct vm_area_struct *vma));
+int hv_remove_ring_sysfs(struct vmbus_channel *channel);
+
#endif /* _HYPERV_VMBUS_H */
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 6e55a1a2613d..9a72101c6be9 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1792,6 +1792,27 @@ static ssize_t subchannel_id_show(struct vmbus_channel *channel,
}
static VMBUS_CHAN_ATTR_RO(subchannel_id);
+static int hv_mmap_ring_buffer_wrapper(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *attr,
+ struct vm_area_struct *vma)
+{
+ struct vmbus_channel *channel = container_of(kobj, struct vmbus_channel, kobj);
+
+ /*
+ * hv_(create|remove)_ring_sysfs implementation ensures that mmap_ring_buffer
+ * is not NULL.
+ */
+ return channel->mmap_ring_buffer(channel, vma);
+}
+
+static struct bin_attribute chan_attr_ring_buffer = {
+ .attr = {
+ .name = "ring",
+ .mode = 0600,
+ },
+ .size = 2 * SZ_2M,
+ .mmap = hv_mmap_ring_buffer_wrapper,
+};
static struct attribute *vmbus_chan_attrs[] = {
&chan_attr_out_mask.attr,
&chan_attr_in_mask.attr,
@@ -1811,6 +1832,11 @@ static struct attribute *vmbus_chan_attrs[] = {
NULL
};
+static struct bin_attribute *vmbus_chan_bin_attrs[] = {
+ &chan_attr_ring_buffer,
+ NULL
+};
+
/*
* Channel-level attribute_group callback function. Returns the permission for
* each attribute, and returns 0 if an attribute is not visible.
@@ -1831,9 +1857,24 @@ static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
return attr->mode;
}
+static umode_t vmbus_chan_bin_attr_is_visible(struct kobject *kobj,
+ const struct bin_attribute *attr, int idx)
+{
+ const struct vmbus_channel *channel =
+ container_of(kobj, struct vmbus_channel, kobj);
+
+ /* Hide ring attribute if channel's ring_sysfs_visible is set to false */
+ if (attr == &chan_attr_ring_buffer && !channel->ring_sysfs_visible)
+ return 0;
+
+ return attr->attr.mode;
+}
+
static const struct attribute_group vmbus_chan_group = {
.attrs = vmbus_chan_attrs,
- .is_visible = vmbus_chan_attr_is_visible
+ .bin_attrs = vmbus_chan_bin_attrs,
+ .is_visible = vmbus_chan_attr_is_visible,
+ .is_bin_visible = vmbus_chan_bin_attr_is_visible,
};
static const struct kobj_type vmbus_chan_ktype = {
@@ -1841,6 +1882,63 @@ static const struct kobj_type vmbus_chan_ktype = {
.release = vmbus_chan_release,
};
+/**
+ * hv_create_ring_sysfs() - create "ring" sysfs entry corresponding to ring buffers for a channel.
+ * @channel: Pointer to vmbus_channel structure
+ * @hv_mmap_ring_buffer: function pointer for initializing the function to be called on mmap of
+ * channel's "ring" sysfs node, which is for the ring buffer of that channel.
+ * Function pointer is of below type:
+ * int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel,
+ * struct vm_area_struct *vma))
+ * This has a pointer to the channel and a pointer to vm_area_struct,
+ * used for mmap, as arguments.
+ *
+ * Sysfs node for ring buffer of a channel is created along with other fields, however its
+ * visibility is disabled by default. Sysfs creation needs to be controlled when the use-case
+ * is running.
+ * For example, HV_NIC device is used either by uio_hv_generic or hv_netvsc at any given point of
+ * time, and "ring" sysfs is needed only when uio_hv_generic is bound to that device. To avoid
+ * exposing the ring buffer by default, this function is reponsible to enable visibility of
+ * ring for userspace to use.
+ * Note: Race conditions can happen with userspace and it is not encouraged to create new
+ * use-cases for this. This was added to maintain backward compatibility, while solving
+ * one of the race conditions in uio_hv_generic while creating sysfs.
+ *
+ * Returns 0 on success or error code on failure.
+ */
+int hv_create_ring_sysfs(struct vmbus_channel *channel,
+ int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel,
+ struct vm_area_struct *vma))
+{
+ struct kobject *kobj = &channel->kobj;
+
+ channel->mmap_ring_buffer = hv_mmap_ring_buffer;
+ channel->ring_sysfs_visible = true;
+
+ return sysfs_update_group(kobj, &vmbus_chan_group);
+}
+EXPORT_SYMBOL_GPL(hv_create_ring_sysfs);
+
+/**
+ * hv_remove_ring_sysfs() - remove ring sysfs entry corresponding to ring buffers for a channel.
+ * @channel: Pointer to vmbus_channel structure
+ *
+ * Hide "ring" sysfs for a channel by changing its is_visible attribute and updating sysfs group.
+ *
+ * Returns 0 on success or error code on failure.
+ */
+int hv_remove_ring_sysfs(struct vmbus_channel *channel)
+{
+ struct kobject *kobj = &channel->kobj;
+ int ret;
+
+ channel->ring_sysfs_visible = false;
+ ret = sysfs_update_group(kobj, &vmbus_chan_group);
+ channel->mmap_ring_buffer = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hv_remove_ring_sysfs);
+
/*
* vmbus_add_channel_kobj - setup a sub-directory under device/channels
*/
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 44afb07409a4..f05986e4f379 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -437,9 +437,13 @@ static ssize_t show_val(struct device *dev,
ret = update_cap(resource);
if (ret)
return ret;
+ resource->power_alarm = resource->power > resource->cap;
+ val = resource->power_alarm;
+ } else {
+ val = resource->power_alarm ||
+ resource->power > resource->cap;
+ resource->power_alarm = resource->power > resource->cap;
}
- val = resource->power_alarm || resource->power > resource->cap;
- resource->power_alarm = resource->power > resource->cap;
break;
case 7:
case 8:
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index cd00adaad1b4..79e5606e6d2f 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -73,7 +73,7 @@
#define DELL_SMM_LEGACY_EXECUTE 0x1
#define DELL_SMM_NO_TEMP 10
-#define DELL_SMM_NO_FANS 3
+#define DELL_SMM_NO_FANS 4
struct smm_regs {
unsigned int eax;
@@ -1074,11 +1074,14 @@ static const struct hwmon_channel_info * const dell_smm_info[] = {
HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX |
HWMON_F_TARGET,
HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX |
+ HWMON_F_TARGET,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX |
HWMON_F_TARGET
),
HWMON_CHANNEL_INFO(pwm,
HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
HWMON_PWM_INPUT
),
NULL
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index d92c536be9af..b779240328d5 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -393,7 +393,12 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev,
if (state >= fan_data->num_speed)
return -EINVAL;
+ mutex_lock(&fan_data->lock);
+
set_fan_speed(fan_data, state);
+
+ mutex_unlock(&fan_data->lock);
+
return 0;
}
@@ -489,7 +494,11 @@ MODULE_DEVICE_TABLE(of, of_gpio_fan_match);
static void gpio_fan_stop(void *data)
{
+ struct gpio_fan_data *fan_data = data;
+
+ mutex_lock(&fan_data->lock);
set_fan_speed(data, 0);
+ mutex_unlock(&fan_data->lock);
}
static int gpio_fan_probe(struct platform_device *pdev)
@@ -562,7 +571,9 @@ static int gpio_fan_suspend(struct device *dev)
if (fan_data->gpios) {
fan_data->resume_speed = fan_data->speed_index;
+ mutex_lock(&fan_data->lock);
set_fan_speed(fan_data, 0);
+ mutex_unlock(&fan_data->lock);
}
return 0;
@@ -572,8 +583,11 @@ static int gpio_fan_resume(struct device *dev)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
- if (fan_data->gpios)
+ if (fan_data->gpios) {
+ mutex_lock(&fan_data->lock);
set_fan_speed(fan_data, fan_data->resume_speed);
+ mutex_unlock(&fan_data->lock);
+ }
return 0;
}
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index 7087197383c9..2cdbd5f107a2 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -105,7 +105,7 @@ struct xgene_hwmon_dev {
phys_addr_t comm_base_addr;
void *pcc_comm_addr;
- u64 usecs_lat;
+ unsigned int usecs_lat;
};
/*
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index 4936dc2f7a56..4fe837b02e31 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -1249,7 +1249,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
- spin_lock_init(&csdev->perf_sink_id_map.lock);
+ raw_spin_lock_init(&csdev->perf_sink_id_map.lock);
csdev->perf_sink_id_map.cpu_map = alloc_percpu(atomic_t);
if (!csdev->perf_sink_id_map.cpu_map) {
kfree(csdev);
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index aea9ac9c4bd0..7948597d483d 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -84,7 +84,7 @@ struct etb_drvdata {
struct clk *atclk;
struct coresight_device *csdev;
struct miscdevice miscdev;
- spinlock_t spinlock;
+ raw_spinlock_t spinlock;
local_t reading;
pid_t pid;
u8 *buf;
@@ -145,7 +145,7 @@ static int etb_enable_sysfs(struct coresight_device *csdev)
unsigned long flags;
struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't messup with perf sessions. */
if (coresight_get_mode(csdev) == CS_MODE_PERF) {
@@ -163,7 +163,7 @@ static int etb_enable_sysfs(struct coresight_device *csdev)
csdev->refcnt++;
out:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return ret;
}
@@ -176,7 +176,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data)
struct perf_output_handle *handle = data;
struct cs_buffers *buf = etm_perf_sink_config(handle);
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
/* No need to continue if the component is already in used by sysFS. */
if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
@@ -219,7 +219,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data)
}
out:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return ret;
}
@@ -352,11 +352,11 @@ static int etb_disable(struct coresight_device *csdev)
struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
unsigned long flags;
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
csdev->refcnt--;
if (csdev->refcnt) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
}
@@ -366,7 +366,7 @@ static int etb_disable(struct coresight_device *csdev)
/* Dissociate from monitored process. */
drvdata->pid = -1;
coresight_set_mode(csdev, CS_MODE_DISABLED);
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_dbg(&csdev->dev, "ETB disabled\n");
return 0;
@@ -443,7 +443,7 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't do anything if another tracer is using this sink */
if (csdev->refcnt != 1)
@@ -566,7 +566,7 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
__etb_enable_hw(drvdata);
CS_LOCK(drvdata->base);
out:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
return to_read;
}
@@ -587,13 +587,13 @@ static void etb_dump(struct etb_drvdata *drvdata)
{
unsigned long flags;
- spin_lock_irqsave(&drvdata->spinlock, flags);
+ raw_spin_lock_irqsave(&drvdata->spinlock, flags);
if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
__etb_disable_hw(drvdata);
etb_dump_hw(drvdata);
__etb_enable_hw(drvdata);
}
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_dbg(&drvdata->csdev->dev, "ETB dumped\n");
}
@@ -746,7 +746,7 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->base = base;
desc.access = CSDEV_ACCESS_IOMEM(base);
- spin_lock_init(&drvdata->spinlock);
+ raw_spin_lock_init(&drvdata->spinlock);
drvdata->buffer_depth = etb_get_buffer_depth(drvdata);
diff --git a/drivers/hwtracing/coresight/coresight-trace-id.c b/drivers/hwtracing/coresight/coresight-trace-id.c
index 378af743be45..7ed337d54d3e 100644
--- a/drivers/hwtracing/coresight/coresight-trace-id.c
+++ b/drivers/hwtracing/coresight/coresight-trace-id.c
@@ -22,7 +22,7 @@ enum trace_id_flags {
static DEFINE_PER_CPU(atomic_t, id_map_default_cpu_ids) = ATOMIC_INIT(0);
static struct coresight_trace_id_map id_map_default = {
.cpu_map = &id_map_default_cpu_ids,
- .lock = __SPIN_LOCK_UNLOCKED(id_map_default.lock)
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(id_map_default.lock)
};
/* #define TRACE_ID_DEBUG 1 */
@@ -131,11 +131,11 @@ static void coresight_trace_id_release_all(struct coresight_trace_id_map *id_map
unsigned long flags;
int cpu;
- spin_lock_irqsave(&id_map->lock, flags);
+ raw_spin_lock_irqsave(&id_map->lock, flags);
bitmap_zero(id_map->used_ids, CORESIGHT_TRACE_IDS_MAX);
for_each_possible_cpu(cpu)
atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
- spin_unlock_irqrestore(&id_map->lock, flags);
+ raw_spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID_MAP(id_map);
}
@@ -144,7 +144,7 @@ static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map
unsigned long flags;
int id;
- spin_lock_irqsave(&id_map->lock, flags);
+ raw_spin_lock_irqsave(&id_map->lock, flags);
/* check for existing allocation for this CPU */
id = _coresight_trace_id_read_cpu_id(cpu, id_map);
@@ -171,7 +171,7 @@ static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map
atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), id);
get_cpu_id_out_unlock:
- spin_unlock_irqrestore(&id_map->lock, flags);
+ raw_spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID_CPU(cpu, id);
DUMP_ID_MAP(id_map);
@@ -188,12 +188,12 @@ static void _coresight_trace_id_put_cpu_id(int cpu, struct coresight_trace_id_ma
if (!id)
return;
- spin_lock_irqsave(&id_map->lock, flags);
+ raw_spin_lock_irqsave(&id_map->lock, flags);
coresight_trace_id_free(id, id_map);
atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
- spin_unlock_irqrestore(&id_map->lock, flags);
+ raw_spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID_CPU(cpu, id);
DUMP_ID_MAP(id_map);
}
@@ -204,9 +204,9 @@ static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *i
unsigned long flags;
int id;
- spin_lock_irqsave(&id_map->lock, flags);
+ raw_spin_lock_irqsave(&id_map->lock, flags);
id = coresight_trace_id_alloc_new_id(id_map, preferred_id, traceid_flags);
- spin_unlock_irqrestore(&id_map->lock, flags);
+ raw_spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID(id);
DUMP_ID_MAP(id_map);
@@ -217,9 +217,9 @@ static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map *
{
unsigned long flags;
- spin_lock_irqsave(&id_map->lock, flags);
+ raw_spin_lock_irqsave(&id_map->lock, flags);
coresight_trace_id_free(id, id_map);
- spin_unlock_irqrestore(&id_map->lock, flags);
+ raw_spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID(id);
DUMP_ID_MAP(id_map);
diff --git a/drivers/hwtracing/intel_th/Kconfig b/drivers/hwtracing/intel_th/Kconfig
index 4b6359326ede..4f7d2b6d79e2 100644
--- a/drivers/hwtracing/intel_th/Kconfig
+++ b/drivers/hwtracing/intel_th/Kconfig
@@ -60,6 +60,7 @@ config INTEL_TH_STH
config INTEL_TH_MSU
tristate "Intel(R) Trace Hub Memory Storage Unit"
+ depends on MMU
help
Memory Storage Unit (MSU) trace output device enables
storing STP traces to system memory. It supports single
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index bf99d79a4192..7163950eb371 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -19,6 +19,7 @@
#include <linux/io.h>
#include <linux/workqueue.h>
#include <linux/dma-mapping.h>
+#include <linux/pfn_t.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
@@ -976,7 +977,6 @@ static void msc_buffer_contig_free(struct msc *msc)
for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
struct page *page = virt_to_page(msc->base + off);
- page->mapping = NULL;
__free_page(page);
}
@@ -1158,9 +1158,6 @@ static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
int i;
for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) {
- struct page *page = msc_sg_page(sg);
-
- page->mapping = NULL;
dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
sg_virt(sg), sg_dma_address(sg));
}
@@ -1601,22 +1598,10 @@ static void msc_mmap_close(struct vm_area_struct *vma)
{
struct msc_iter *iter = vma->vm_file->private_data;
struct msc *msc = iter->msc;
- unsigned long pg;
if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex))
return;
- /* drop page _refcounts */
- for (pg = 0; pg < msc->nr_pages; pg++) {
- struct page *page = msc_buffer_get_page(msc, pg);
-
- if (WARN_ON_ONCE(!page))
- continue;
-
- if (page->mapping)
- page->mapping = NULL;
- }
-
/* last mapping -- drop user_count */
atomic_dec(&msc->user_count);
mutex_unlock(&msc->buf_mutex);
@@ -1626,16 +1611,14 @@ static vm_fault_t msc_mmap_fault(struct vm_fault *vmf)
{
struct msc_iter *iter = vmf->vma->vm_file->private_data;
struct msc *msc = iter->msc;
+ struct page *page;
- vmf->page = msc_buffer_get_page(msc, vmf->pgoff);
- if (!vmf->page)
+ page = msc_buffer_get_page(msc, vmf->pgoff);
+ if (!page)
return VM_FAULT_SIGBUS;
- get_page(vmf->page);
- vmf->page->mapping = vmf->vma->vm_file->f_mapping;
- vmf->page->index = vmf->pgoff;
-
- return 0;
+ get_page(page);
+ return vmf_insert_mixed(vmf->vma, vmf->address, page_to_pfn_t(page));
}
static const struct vm_operations_struct msc_mmap_ops = {
@@ -1676,7 +1659,7 @@ out:
atomic_dec(&msc->user_count);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY);
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY | VM_MIXEDMAP);
vma->vm_ops = &msc_mmap_ops;
return ret;
}
diff --git a/drivers/i2c/busses/i2c-amd-asf-plat.c b/drivers/i2c/busses/i2c-amd-asf-plat.c
index 93ebec162c6d..61bc714c28d7 100644
--- a/drivers/i2c/busses/i2c-amd-asf-plat.c
+++ b/drivers/i2c/busses/i2c-amd-asf-plat.c
@@ -69,7 +69,7 @@ static void amd_asf_process_target(struct work_struct *work)
/* Check if no error bits are set in target status register */
if (reg & ASF_ERROR_STATUS) {
/* Set bank as full */
- cmd = 0;
+ cmd = 1;
reg |= GENMASK(3, 2);
outb_p(reg, ASFDATABNKSEL);
} else {
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 8e0267c7cc29..f21f9877c040 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -278,9 +278,11 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) {
dev->slave = i2c_new_ccgx_ucsi(&dev->adapter, dev->irq, &dgpu_node);
- if (IS_ERR(dev->slave))
+ if (IS_ERR(dev->slave)) {
+ i2c_del_adapter(&dev->adapter);
return dev_err_probe(device, PTR_ERR(dev->slave),
"register UCSI failed\n");
+ }
}
pm_runtime_set_autosuspend_delay(device, 1000);
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 0d4b3935e687..342d47e67586 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -1380,9 +1380,9 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
return 0;
rpm_disable:
- pm_runtime_put(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
return ret;
}
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index cb6988482673..4415a29f749b 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1503,7 +1503,10 @@ static int i2c_pxa_probe(struct platform_device *dev)
i2c->adap.name);
}
- clk_prepare_enable(i2c->clk);
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ return dev_err_probe(&dev->dev, ret,
+ "failed to enable clock\n");
if (i2c->use_pio) {
i2c->adap.algo = &i2c_pxa_pio_algorithm;
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 7bbd478171e0..515a784c951c 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -148,9 +148,9 @@ struct geni_i2c_clk_fld {
* source_clock = 19.2 MHz
*/
static const struct geni_i2c_clk_fld geni_i2c_clk_map_19p2mhz[] = {
- {KHZ(100), 7, 10, 11, 26},
- {KHZ(400), 2, 5, 12, 24},
- {KHZ(1000), 1, 3, 9, 18},
+ {KHZ(100), 7, 10, 12, 26},
+ {KHZ(400), 2, 5, 11, 22},
+ {KHZ(1000), 1, 2, 8, 18},
{},
};
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index da20b4487c9a..3a36d682ed57 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -14,6 +14,7 @@
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/i2c.h>
+#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -150,6 +151,8 @@
/* TAG length for DATA READ in RX FIFO */
#define READ_RX_TAGS_LEN 2
+#define QUP_BUS_WIDTH 8
+
static unsigned int scl_freq;
module_param_named(scl_freq, scl_freq, uint, 0444);
MODULE_PARM_DESC(scl_freq, "SCL frequency override");
@@ -227,6 +230,7 @@ struct qup_i2c_dev {
int irq;
struct clk *clk;
struct clk *pclk;
+ struct icc_path *icc_path;
struct i2c_adapter adap;
int clk_ctl;
@@ -255,6 +259,10 @@ struct qup_i2c_dev {
/* To configure when bus is in run state */
u32 config_run;
+ /* bandwidth votes */
+ u32 src_clk_freq;
+ u32 cur_bw_clk_freq;
+
/* dma parameters */
bool is_dma;
/* To check if the current transfer is using DMA */
@@ -453,6 +461,23 @@ static int qup_i2c_bus_active(struct qup_i2c_dev *qup, int len)
return ret;
}
+static int qup_i2c_vote_bw(struct qup_i2c_dev *qup, u32 clk_freq)
+{
+ u32 needed_peak_bw;
+ int ret;
+
+ if (qup->cur_bw_clk_freq == clk_freq)
+ return 0;
+
+ needed_peak_bw = Bps_to_icc(clk_freq * QUP_BUS_WIDTH);
+ ret = icc_set_bw(qup->icc_path, 0, needed_peak_bw);
+ if (ret)
+ return ret;
+
+ qup->cur_bw_clk_freq = clk_freq;
+ return 0;
+}
+
static void qup_i2c_write_tx_fifo_v1(struct qup_i2c_dev *qup)
{
struct qup_i2c_block *blk = &qup->blk;
@@ -838,6 +863,10 @@ static int qup_i2c_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
int ret = 0;
int idx = 0;
+ ret = qup_i2c_vote_bw(qup, qup->src_clk_freq);
+ if (ret)
+ return ret;
+
enable_irq(qup->irq);
ret = qup_i2c_req_dma(qup);
@@ -1643,6 +1672,7 @@ static void qup_i2c_disable_clocks(struct qup_i2c_dev *qup)
config = readl(qup->base + QUP_CONFIG);
config |= QUP_CLOCK_AUTO_GATE;
writel(config, qup->base + QUP_CONFIG);
+ qup_i2c_vote_bw(qup, 0);
clk_disable_unprepare(qup->pclk);
}
@@ -1743,6 +1773,11 @@ static int qup_i2c_probe(struct platform_device *pdev)
goto fail_dma;
}
qup->is_dma = true;
+
+ qup->icc_path = devm_of_icc_get(&pdev->dev, NULL);
+ if (IS_ERR(qup->icc_path))
+ return dev_err_probe(&pdev->dev, PTR_ERR(qup->icc_path),
+ "failed to get interconnect path\n");
}
nodma:
@@ -1791,6 +1826,7 @@ nodma:
qup_i2c_enable_clocks(qup);
src_clk_freq = clk_get_rate(qup->clk);
}
+ qup->src_clk_freq = src_clk_freq;
/*
* Bootloaders might leave a pending interrupt on certain QUP's,
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index ed7b9d7f688c..75127b6c161f 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -158,6 +158,10 @@ struct svc_i3c_regs_save {
u32 mdynaddr;
};
+struct svc_i3c_drvdata {
+ u32 quirks;
+};
+
/**
* struct svc_i3c_master - Silvaco I3C Master structure
* @base: I3C master controller
@@ -183,6 +187,7 @@ struct svc_i3c_regs_save {
* @ibi.tbq_slot: To be queued IBI slot
* @ibi.lock: IBI lock
* @lock: Transfer lock, protect between IBI work thread and callbacks from master
+ * @drvdata: Driver data
* @enabled_events: Bit masks for enable events (IBI, HotJoin).
* @mctrl_config: Configuration value in SVC_I3C_MCTRL for setting speed back.
*/
@@ -214,6 +219,7 @@ struct svc_i3c_master {
spinlock_t lock;
} ibi;
struct mutex lock;
+ const struct svc_i3c_drvdata *drvdata;
u32 enabled_events;
u32 mctrl_config;
};
@@ -545,6 +551,8 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
queue_work(master->base.wq, &master->hj_work);
break;
case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
+ svc_i3c_master_emit_stop(master);
+ break;
default:
break;
}
@@ -892,6 +900,8 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
u32 reg;
int ret, i;
+ svc_i3c_master_flush_fifo(master);
+
while (true) {
/* clean SVC_I3C_MINT_IBIWON w1c bits */
writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
@@ -1817,6 +1827,10 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
+ master->drvdata = of_device_get_match_data(dev);
+ if (!master->drvdata)
+ return -EINVAL;
+
master->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(master->regs))
return PTR_ERR(master->regs);
@@ -1958,8 +1972,13 @@ static const struct dev_pm_ops svc_i3c_pm_ops = {
svc_i3c_runtime_resume, NULL)
};
+static const struct svc_i3c_drvdata npcm845_drvdata = {};
+
+static const struct svc_i3c_drvdata svc_default_drvdata = {};
+
static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
- { .compatible = "silvaco,i3c-master-v1"},
+ { .compatible = "nuvoton,npcm845-i3c", .data = &npcm845_drvdata },
+ { .compatible = "silvaco,i3c-master-v1", .data = &svc_default_drvdata },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c
index 8601b9a8b8e7..5127e58eebc7 100644
--- a/drivers/iio/accel/adis16201.c
+++ b/drivers/iio/accel/adis16201.c
@@ -211,9 +211,9 @@ static const struct iio_chan_spec adis16201_channels[] = {
BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC_REG, ADIS16201_SCAN_AUX_ADC, 0, 12),
ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT_REG, ADIS16201_SCAN_INCLI_X,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
+ BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 12),
ADIS_INCLI_CHAN(Y, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
+ BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 12),
IIO_CHAN_SOFT_TIMESTAMP(7)
};
diff --git a/drivers/iio/accel/adxl355_core.c b/drivers/iio/accel/adxl355_core.c
index e8cd21fa77a6..cbac622ef821 100644
--- a/drivers/iio/accel/adxl355_core.c
+++ b/drivers/iio/accel/adxl355_core.c
@@ -231,7 +231,7 @@ struct adxl355_data {
u8 transf_buf[3];
struct {
u8 buf[14];
- s64 ts;
+ aligned_s64 ts;
} buffer;
} __aligned(IIO_DMA_MINALIGN);
};
diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c
index a48ac0d7bd96..2ba7d7de47e4 100644
--- a/drivers/iio/accel/adxl367.c
+++ b/drivers/iio/accel/adxl367.c
@@ -604,18 +604,14 @@ static int _adxl367_set_odr(struct adxl367_state *st, enum adxl367_odr odr)
if (ret)
return ret;
+ st->odr = odr;
+
/* Activity timers depend on ODR */
ret = _adxl367_set_act_time_ms(st, st->act_time_ms);
if (ret)
return ret;
- ret = _adxl367_set_inact_time_ms(st, st->inact_time_ms);
- if (ret)
- return ret;
-
- st->odr = odr;
-
- return 0;
+ return _adxl367_set_inact_time_ms(st, st->inact_time_ms);
}
static int adxl367_set_odr(struct iio_dev *indio_dev, enum adxl367_odr odr)
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
index 987212a7c038..a0ae30c86687 100644
--- a/drivers/iio/accel/fxls8962af-core.c
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -1229,8 +1229,11 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq)
if (ret)
return ret;
- if (device_property_read_bool(dev, "wakeup-source"))
- device_init_wakeup(dev, true);
+ if (device_property_read_bool(dev, "wakeup-source")) {
+ ret = devm_device_init_wakeup(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init wakeup\n");
+ }
return devm_iio_device_register(dev, indio_dev);
}
diff --git a/drivers/iio/adc/ad4695.c b/drivers/iio/adc/ad4695.c
index b79d135a5471..22fdc454b0ce 100644
--- a/drivers/iio/adc/ad4695.c
+++ b/drivers/iio/adc/ad4695.c
@@ -92,6 +92,8 @@
#define AD4695_T_REFBUF_MS 100
#define AD4695_T_REGCONFIG_NS 20
#define AD4695_T_SCK_CNV_DELAY_NS 80
+#define AD4695_T_CNVL_NS 80
+#define AD4695_T_CNVH_NS 10
#define AD4695_REG_ACCESS_SCLK_HZ (10 * MEGA)
/* Max number of voltage input channels. */
@@ -364,11 +366,31 @@ static int ad4695_enter_advanced_sequencer_mode(struct ad4695_state *st, u32 n)
*/
static int ad4695_exit_conversion_mode(struct ad4695_state *st)
{
- struct spi_transfer xfer = {
- .tx_buf = &st->cnv_cmd2,
- .len = 1,
- .delay.value = AD4695_T_REGCONFIG_NS,
- .delay.unit = SPI_DELAY_UNIT_NSECS,
+ /*
+ * An extra transfer is needed to trigger a conversion here so
+ * that we can be 100% sure the command will be processed by the
+ * ADC, rather than relying on it to be in the correct state
+ * when this function is called (this chip has a quirk where the
+ * command only works when reading a conversion, and if the
+ * previous conversion was already read then it won't work). The
+ * actual conversion command is then run at the slower
+ * AD4695_REG_ACCESS_SCLK_HZ speed to guarantee this works.
+ */
+ struct spi_transfer xfers[] = {
+ {
+ .delay.value = AD4695_T_CNVL_NS,
+ .delay.unit = SPI_DELAY_UNIT_NSECS,
+ .cs_change = 1,
+ .cs_change_delay.value = AD4695_T_CNVH_NS,
+ .cs_change_delay.unit = SPI_DELAY_UNIT_NSECS,
+ },
+ {
+ .speed_hz = AD4695_REG_ACCESS_SCLK_HZ,
+ .tx_buf = &st->cnv_cmd2,
+ .len = 1,
+ .delay.value = AD4695_T_REGCONFIG_NS,
+ .delay.unit = SPI_DELAY_UNIT_NSECS,
+ },
};
/*
@@ -377,7 +399,7 @@ static int ad4695_exit_conversion_mode(struct ad4695_state *st)
*/
st->cnv_cmd2 = AD4695_CMD_EXIT_CNV_MODE << 3;
- return spi_sync_transfer(st->spi, &xfer, 1);
+ return spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
}
static int ad4695_set_ref_voltage(struct ad4695_state *st, int vref_mv)
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index 858c8be2ff1a..44346f5a5aee 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -45,7 +45,7 @@ struct ad7266_state {
*/
struct {
__be16 sample[2];
- s64 timestamp;
+ aligned_s64 timestamp;
} data __aligned(IIO_DMA_MINALIGN);
};
diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
index d39354afd539..1c547e8d52ac 100644
--- a/drivers/iio/adc/ad7606.c
+++ b/drivers/iio/adc/ad7606.c
@@ -85,6 +85,10 @@ static const unsigned int ad7606_oversampling_avail[7] = {
1, 2, 4, 8, 16, 32, 64,
};
+static const unsigned int ad7606b_oversampling_avail[9] = {
+ 1, 2, 4, 8, 16, 32, 64, 128, 256,
+};
+
static const unsigned int ad7616_oversampling_avail[8] = {
1, 2, 4, 8, 16, 32, 64, 128,
};
@@ -187,6 +191,8 @@ static int ad7608_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch);
static int ad7609_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch);
+static int ad7616_sw_mode_setup(struct iio_dev *indio_dev);
+static int ad7606b_sw_mode_setup(struct iio_dev *indio_dev);
const struct ad7606_chip_info ad7605_4_info = {
.channels = ad7605_channels,
@@ -239,6 +245,7 @@ const struct ad7606_chip_info ad7606b_info = {
.oversampling_avail = ad7606_oversampling_avail,
.oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
.scale_setup_cb = ad7606_16bit_chan_scale_setup,
+ .sw_setup_cb = ad7606b_sw_mode_setup,
};
EXPORT_SYMBOL_NS_GPL(ad7606b_info, "IIO_AD7606");
@@ -250,6 +257,7 @@ const struct ad7606_chip_info ad7606c_16_info = {
.oversampling_avail = ad7606_oversampling_avail,
.oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
.scale_setup_cb = ad7606c_16bit_chan_scale_setup,
+ .sw_setup_cb = ad7606b_sw_mode_setup,
};
EXPORT_SYMBOL_NS_GPL(ad7606c_16_info, "IIO_AD7606");
@@ -294,6 +302,7 @@ const struct ad7606_chip_info ad7606c_18_info = {
.oversampling_avail = ad7606_oversampling_avail,
.oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
.scale_setup_cb = ad7606c_18bit_chan_scale_setup,
+ .sw_setup_cb = ad7606b_sw_mode_setup,
};
EXPORT_SYMBOL_NS_GPL(ad7606c_18_info, "IIO_AD7606");
@@ -307,6 +316,7 @@ const struct ad7606_chip_info ad7616_info = {
.oversampling_num = ARRAY_SIZE(ad7616_oversampling_avail),
.os_req_reset = true,
.scale_setup_cb = ad7606_16bit_chan_scale_setup,
+ .sw_setup_cb = ad7616_sw_mode_setup,
};
EXPORT_SYMBOL_NS_GPL(ad7616_info, "IIO_AD7606");
@@ -852,7 +862,12 @@ static int ad7606_write_raw(struct iio_dev *indio_dev,
}
val = (val * MICRO) + val2;
i = find_closest(val, scale_avail_uv, cs->num_scales);
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret < 0)
+ return ret;
ret = st->write_scale(indio_dev, ch, i + cs->reg_offset);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
cs->range = i;
@@ -863,7 +878,12 @@ static int ad7606_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
i = find_closest(val, st->oversampling_avail,
st->num_os_ratios);
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret < 0)
+ return ret;
ret = st->write_os(indio_dev, i);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
st->oversampling = st->oversampling_avail[i];
@@ -1138,16 +1158,123 @@ static const struct iio_trigger_ops ad7606_trigger_ops = {
.validate_device = iio_trigger_validate_own_device,
};
-static int ad7606_sw_mode_setup(struct iio_dev *indio_dev)
+static int ad7606_write_mask(struct ad7606_state *st, unsigned int addr,
+ unsigned long mask, unsigned int val)
+{
+ int readval;
+
+ readval = st->bops->reg_read(st, addr);
+ if (readval < 0)
+ return readval;
+
+ readval &= ~mask;
+ readval |= val;
+
+ return st->bops->reg_write(st, addr, readval);
+}
+
+static int ad7616_write_scale_sw(struct iio_dev *indio_dev, int ch, int val)
{
struct ad7606_state *st = iio_priv(indio_dev);
+ unsigned int ch_addr, mode, ch_index;
- st->sw_mode_en = st->bops->sw_mode_config &&
- device_property_present(st->dev, "adi,sw-mode");
- if (!st->sw_mode_en)
- return 0;
+ /*
+ * Ad7616 has 16 channels divided in group A and group B.
+ * The range of channels from A are stored in registers with address 4
+ * while channels from B are stored in register with address 6.
+ * The last bit from channels determines if it is from group A or B
+ * because the order of channels in iio is 0A, 0B, 1A, 1B...
+ */
+ ch_index = ch >> 1;
+
+ ch_addr = AD7616_RANGE_CH_ADDR(ch_index);
+
+ if ((ch & 0x1) == 0) /* channel A */
+ ch_addr += AD7616_RANGE_CH_A_ADDR_OFF;
+ else /* channel B */
+ ch_addr += AD7616_RANGE_CH_B_ADDR_OFF;
+
+ /* 0b01 for 2.5v, 0b10 for 5v and 0b11 for 10v */
+ mode = AD7616_RANGE_CH_MODE(ch_index, ((val + 1) & 0b11));
+
+ return ad7606_write_mask(st, ch_addr, AD7616_RANGE_CH_MSK(ch_index),
+ mode);
+}
+
+static int ad7616_write_os_sw(struct iio_dev *indio_dev, int val)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+
+ return ad7606_write_mask(st, AD7616_CONFIGURATION_REGISTER,
+ AD7616_OS_MASK, val << 2);
+}
+
+static int ad7606_write_scale_sw(struct iio_dev *indio_dev, int ch, int val)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+
+ return ad7606_write_mask(st, AD7606_RANGE_CH_ADDR(ch),
+ AD7606_RANGE_CH_MSK(ch),
+ AD7606_RANGE_CH_MODE(ch, val));
+}
+
+static int ad7606_write_os_sw(struct iio_dev *indio_dev, int val)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+
+ return st->bops->reg_write(st, AD7606_OS_MODE, val);
+}
+
+static int ad7616_sw_mode_setup(struct iio_dev *indio_dev)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+ int ret;
+
+ /*
+ * Scale can be configured individually for each channel
+ * in software mode.
+ */
+
+ st->write_scale = ad7616_write_scale_sw;
+ st->write_os = &ad7616_write_os_sw;
+
+ if (st->bops->sw_mode_config) {
+ ret = st->bops->sw_mode_config(indio_dev);
+ if (ret)
+ return ret;
+ }
+
+ /* Activate Burst mode and SEQEN MODE */
+ return ad7606_write_mask(st, AD7616_CONFIGURATION_REGISTER,
+ AD7616_BURST_MODE | AD7616_SEQEN_MODE,
+ AD7616_BURST_MODE | AD7616_SEQEN_MODE);
+}
- indio_dev->info = &ad7606_info_sw_mode;
+static int ad7606b_sw_mode_setup(struct iio_dev *indio_dev)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+ DECLARE_BITMAP(os, 3);
+
+ bitmap_fill(os, 3);
+ /*
+ * Software mode is enabled when all three oversampling
+ * pins are set to high. If oversampling gpios are defined
+ * in the device tree, then they need to be set to high,
+ * otherwise, they must be hardwired to VDD
+ */
+ if (st->gpio_os) {
+ gpiod_set_array_value(st->gpio_os->ndescs, st->gpio_os->desc,
+ st->gpio_os->info, os);
+ }
+ /* OS of 128 and 256 are available only in software mode */
+ st->oversampling_avail = ad7606b_oversampling_avail;
+ st->num_os_ratios = ARRAY_SIZE(ad7606b_oversampling_avail);
+
+ st->write_scale = ad7606_write_scale_sw;
+ st->write_os = &ad7606_write_os_sw;
+
+ if (!st->bops->sw_mode_config)
+ return 0;
return st->bops->sw_mode_config(indio_dev);
}
@@ -1246,17 +1373,6 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
return -ERESTARTSYS;
}
- st->write_scale = ad7606_write_scale_hw;
- st->write_os = ad7606_write_os_hw;
-
- ret = ad7606_sw_mode_setup(indio_dev);
- if (ret)
- return ret;
-
- ret = ad7606_chan_scales_setup(indio_dev);
- if (ret)
- return ret;
-
/* If convst pin is not defined, setup PWM. */
if (!st->gpio_convst) {
st->cnvst_pwm = devm_pwm_get(dev, NULL);
@@ -1334,6 +1450,20 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
return ret;
}
+ st->write_scale = ad7606_write_scale_hw;
+ st->write_os = ad7606_write_os_hw;
+
+ st->sw_mode_en = st->chip_info->sw_setup_cb &&
+ device_property_present(st->dev, "adi,sw-mode");
+ if (st->sw_mode_en) {
+ indio_dev->info = &ad7606_info_sw_mode;
+ st->chip_info->sw_setup_cb(indio_dev);
+ }
+
+ ret = ad7606_chan_scales_setup(indio_dev);
+ if (ret)
+ return ret;
+
return devm_iio_device_register(dev, indio_dev);
}
EXPORT_SYMBOL_NS_GPL(ad7606_probe, "IIO_AD7606");
diff --git a/drivers/iio/adc/ad7606.h b/drivers/iio/adc/ad7606.h
index 8778ffe515b3..7a044b499cfe 100644
--- a/drivers/iio/adc/ad7606.h
+++ b/drivers/iio/adc/ad7606.h
@@ -10,6 +10,36 @@
#define AD760X_MAX_CHANNELS 16
+#define AD7616_CONFIGURATION_REGISTER 0x02
+#define AD7616_OS_MASK GENMASK(4, 2)
+#define AD7616_BURST_MODE BIT(6)
+#define AD7616_SEQEN_MODE BIT(5)
+#define AD7616_RANGE_CH_A_ADDR_OFF 0x04
+#define AD7616_RANGE_CH_B_ADDR_OFF 0x06
+/*
+ * Range of channels from a group are stored in 2 registers.
+ * 0, 1, 2, 3 in a register followed by 4, 5, 6, 7 in second register.
+ * For channels from second group(8-15) the order is the same, only with
+ * an offset of 2 for register address.
+ */
+#define AD7616_RANGE_CH_ADDR(ch) ((ch) >> 2)
+/* The range of the channel is stored in 2 bits */
+#define AD7616_RANGE_CH_MSK(ch) (0b11 << (((ch) & 0b11) * 2))
+#define AD7616_RANGE_CH_MODE(ch, mode) ((mode) << ((((ch) & 0b11)) * 2))
+
+#define AD7606_CONFIGURATION_REGISTER 0x02
+#define AD7606_SINGLE_DOUT 0x00
+
+/*
+ * Range for AD7606B channels are stored in registers starting with address 0x3.
+ * Each register stores range for 2 channels(4 bits per channel).
+ */
+#define AD7606_RANGE_CH_MSK(ch) (GENMASK(3, 0) << (4 * ((ch) & 0x1)))
+#define AD7606_RANGE_CH_MODE(ch, mode) \
+ ((GENMASK(3, 0) & (mode)) << (4 * ((ch) & 0x1)))
+#define AD7606_RANGE_CH_ADDR(ch) (0x03 + ((ch) >> 1))
+#define AD7606_OS_MODE 0x08
+
#define AD760X_CHANNEL(num, mask_sep, mask_type, mask_all, bits) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
@@ -71,6 +101,7 @@ struct ad7606_state;
typedef int (*ad7606_scale_setup_cb_t)(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch);
+typedef int (*ad7606_sw_setup_cb_t)(struct iio_dev *indio_dev);
/**
* struct ad7606_chip_info - chip specific information
@@ -80,6 +111,7 @@ typedef int (*ad7606_scale_setup_cb_t)(struct iio_dev *indio_dev,
* @num_channels: number of channels
* @num_adc_channels the number of channels the ADC actually inputs.
* @scale_setup_cb: callback to setup the scales for each channel
+ * @sw_setup_cb: callback to setup the software mode if available.
* @oversampling_avail pointer to the array which stores the available
* oversampling ratios.
* @oversampling_num number of elements stored in oversampling_avail array
@@ -94,6 +126,7 @@ struct ad7606_chip_info {
unsigned int num_adc_channels;
unsigned int num_channels;
ad7606_scale_setup_cb_t scale_setup_cb;
+ ad7606_sw_setup_cb_t sw_setup_cb;
const unsigned int *oversampling_avail;
unsigned int oversampling_num;
bool os_req_reset;
@@ -206,10 +239,6 @@ struct ad7606_bus_ops {
int (*reg_write)(struct ad7606_state *st,
unsigned int addr,
unsigned int val);
- int (*write_mask)(struct ad7606_state *st,
- unsigned int addr,
- unsigned long mask,
- unsigned int val);
int (*update_scan_mode)(struct iio_dev *indio_dev, const unsigned long *scan_mask);
u16 (*rd_wr_cmd)(int addr, char isWriteOp);
};
diff --git a/drivers/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c
index e2c147525706..179115e90988 100644
--- a/drivers/iio/adc/ad7606_spi.c
+++ b/drivers/iio/adc/ad7606_spi.c
@@ -15,36 +15,6 @@
#define MAX_SPI_FREQ_HZ 23500000 /* VDRIVE above 4.75 V */
-#define AD7616_CONFIGURATION_REGISTER 0x02
-#define AD7616_OS_MASK GENMASK(4, 2)
-#define AD7616_BURST_MODE BIT(6)
-#define AD7616_SEQEN_MODE BIT(5)
-#define AD7616_RANGE_CH_A_ADDR_OFF 0x04
-#define AD7616_RANGE_CH_B_ADDR_OFF 0x06
-/*
- * Range of channels from a group are stored in 2 registers.
- * 0, 1, 2, 3 in a register followed by 4, 5, 6, 7 in second register.
- * For channels from second group(8-15) the order is the same, only with
- * an offset of 2 for register address.
- */
-#define AD7616_RANGE_CH_ADDR(ch) ((ch) >> 2)
-/* The range of the channel is stored in 2 bits */
-#define AD7616_RANGE_CH_MSK(ch) (0b11 << (((ch) & 0b11) * 2))
-#define AD7616_RANGE_CH_MODE(ch, mode) ((mode) << ((((ch) & 0b11)) * 2))
-
-#define AD7606_CONFIGURATION_REGISTER 0x02
-#define AD7606_SINGLE_DOUT 0x00
-
-/*
- * Range for AD7606B channels are stored in registers starting with address 0x3.
- * Each register stores range for 2 channels(4 bits per channel).
- */
-#define AD7606_RANGE_CH_MSK(ch) (GENMASK(3, 0) << (4 * ((ch) & 0x1)))
-#define AD7606_RANGE_CH_MODE(ch, mode) \
- ((GENMASK(3, 0) & mode) << (4 * ((ch) & 0x1)))
-#define AD7606_RANGE_CH_ADDR(ch) (0x03 + ((ch) >> 1))
-#define AD7606_OS_MODE 0x08
-
static const struct iio_chan_spec ad7616_sw_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(16),
AD7616_CHANNEL(0),
@@ -89,10 +59,6 @@ static const struct iio_chan_spec ad7606c_18_sw_channels[] = {
AD7606_SW_CHANNEL(7, 18),
};
-static const unsigned int ad7606B_oversampling_avail[9] = {
- 1, 2, 4, 8, 16, 32, 64, 128, 256
-};
-
static u16 ad7616_spi_rd_wr_cmd(int addr, char isWriteOp)
{
/*
@@ -165,7 +131,7 @@ static int ad7606_spi_reg_read(struct ad7606_state *st, unsigned int addr)
{
.tx_buf = &st->d16[0],
.len = 2,
- .cs_change = 0,
+ .cs_change = 1,
}, {
.rx_buf = &st->d16[1],
.len = 2,
@@ -194,118 +160,20 @@ static int ad7606_spi_reg_write(struct ad7606_state *st,
return spi_write(spi, &st->d16[0], sizeof(st->d16[0]));
}
-static int ad7606_spi_write_mask(struct ad7606_state *st,
- unsigned int addr,
- unsigned long mask,
- unsigned int val)
-{
- int readval;
-
- readval = st->bops->reg_read(st, addr);
- if (readval < 0)
- return readval;
-
- readval &= ~mask;
- readval |= val;
-
- return st->bops->reg_write(st, addr, readval);
-}
-
-static int ad7616_write_scale_sw(struct iio_dev *indio_dev, int ch, int val)
-{
- struct ad7606_state *st = iio_priv(indio_dev);
- unsigned int ch_addr, mode, ch_index;
-
-
- /*
- * Ad7616 has 16 channels divided in group A and group B.
- * The range of channels from A are stored in registers with address 4
- * while channels from B are stored in register with address 6.
- * The last bit from channels determines if it is from group A or B
- * because the order of channels in iio is 0A, 0B, 1A, 1B...
- */
- ch_index = ch >> 1;
-
- ch_addr = AD7616_RANGE_CH_ADDR(ch_index);
-
- if ((ch & 0x1) == 0) /* channel A */
- ch_addr += AD7616_RANGE_CH_A_ADDR_OFF;
- else /* channel B */
- ch_addr += AD7616_RANGE_CH_B_ADDR_OFF;
-
- /* 0b01 for 2.5v, 0b10 for 5v and 0b11 for 10v */
- mode = AD7616_RANGE_CH_MODE(ch_index, ((val + 1) & 0b11));
- return st->bops->write_mask(st, ch_addr, AD7616_RANGE_CH_MSK(ch_index),
- mode);
-}
-
-static int ad7616_write_os_sw(struct iio_dev *indio_dev, int val)
-{
- struct ad7606_state *st = iio_priv(indio_dev);
-
- return st->bops->write_mask(st, AD7616_CONFIGURATION_REGISTER,
- AD7616_OS_MASK, val << 2);
-}
-
-static int ad7606_write_scale_sw(struct iio_dev *indio_dev, int ch, int val)
-{
- struct ad7606_state *st = iio_priv(indio_dev);
-
- return ad7606_spi_write_mask(st,
- AD7606_RANGE_CH_ADDR(ch),
- AD7606_RANGE_CH_MSK(ch),
- AD7606_RANGE_CH_MODE(ch, val));
-}
-
-static int ad7606_write_os_sw(struct iio_dev *indio_dev, int val)
-{
- struct ad7606_state *st = iio_priv(indio_dev);
-
- return ad7606_spi_reg_write(st, AD7606_OS_MODE, val);
-}
-
static int ad7616_sw_mode_config(struct iio_dev *indio_dev)
{
- struct ad7606_state *st = iio_priv(indio_dev);
-
/*
* Scale can be configured individually for each channel
* in software mode.
*/
indio_dev->channels = ad7616_sw_channels;
- st->write_scale = ad7616_write_scale_sw;
- st->write_os = &ad7616_write_os_sw;
-
- /* Activate Burst mode and SEQEN MODE */
- return st->bops->write_mask(st,
- AD7616_CONFIGURATION_REGISTER,
- AD7616_BURST_MODE | AD7616_SEQEN_MODE,
- AD7616_BURST_MODE | AD7616_SEQEN_MODE);
+ return 0;
}
static int ad7606B_sw_mode_config(struct iio_dev *indio_dev)
{
struct ad7606_state *st = iio_priv(indio_dev);
- DECLARE_BITMAP(os, 3);
-
- bitmap_fill(os, 3);
- /*
- * Software mode is enabled when all three oversampling
- * pins are set to high. If oversampling gpios are defined
- * in the device tree, then they need to be set to high,
- * otherwise, they must be hardwired to VDD
- */
- if (st->gpio_os) {
- gpiod_set_array_value(st->gpio_os->ndescs,
- st->gpio_os->desc, st->gpio_os->info, os);
- }
- /* OS of 128 and 256 are available only in software mode */
- st->oversampling_avail = ad7606B_oversampling_avail;
- st->num_os_ratios = ARRAY_SIZE(ad7606B_oversampling_avail);
-
- st->write_scale = ad7606_write_scale_sw;
- st->write_os = &ad7606_write_os_sw;
/* Configure device spi to output on a single channel */
st->bops->reg_write(st,
@@ -350,7 +218,6 @@ static const struct ad7606_bus_ops ad7616_spi_bops = {
.read_block = ad7606_spi_read_block,
.reg_read = ad7606_spi_reg_read,
.reg_write = ad7606_spi_reg_write,
- .write_mask = ad7606_spi_write_mask,
.rd_wr_cmd = ad7616_spi_rd_wr_cmd,
.sw_mode_config = ad7616_sw_mode_config,
};
@@ -359,7 +226,6 @@ static const struct ad7606_bus_ops ad7606b_spi_bops = {
.read_block = ad7606_spi_read_block,
.reg_read = ad7606_spi_reg_read,
.reg_write = ad7606_spi_reg_write,
- .write_mask = ad7606_spi_write_mask,
.rd_wr_cmd = ad7606B_spi_rd_wr_cmd,
.sw_mode_config = ad7606B_sw_mode_config,
};
@@ -368,7 +234,6 @@ static const struct ad7606_bus_ops ad7606c_18_spi_bops = {
.read_block = ad7606_spi_read_block18to32,
.reg_read = ad7606_spi_reg_read,
.reg_write = ad7606_spi_reg_write,
- .write_mask = ad7606_spi_write_mask,
.rd_wr_cmd = ad7606B_spi_rd_wr_cmd,
.sw_mode_config = ad7606c_18_sw_mode_config,
};
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
index 6f8816483f1a..a9248a85466e 100644
--- a/drivers/iio/adc/ad7768-1.c
+++ b/drivers/iio/adc/ad7768-1.c
@@ -142,7 +142,7 @@ static const struct iio_chan_spec ad7768_channels[] = {
.channel = 0,
.scan_index = 0,
.scan_type = {
- .sign = 'u',
+ .sign = 's',
.realbits = 24,
.storagebits = 32,
.shift = 8,
@@ -169,7 +169,7 @@ struct ad7768_state {
union {
struct {
__be32 chan;
- s64 timestamp;
+ aligned_s64 timestamp;
} scan;
__be32 d32;
u8 d8[2];
@@ -370,12 +370,11 @@ static int ad7768_read_raw(struct iio_dev *indio_dev,
return ret;
ret = ad7768_scan_direct(indio_dev);
- if (ret >= 0)
- *val = ret;
iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
+ *val = sign_extend32(ret, chan->scan_type.realbits - 1);
return IIO_VAL_INT;
diff --git a/drivers/iio/adc/ad7944.c b/drivers/iio/adc/ad7944.c
index 0ec9cda10f5f..abfababcea10 100644
--- a/drivers/iio/adc/ad7944.c
+++ b/drivers/iio/adc/ad7944.c
@@ -98,6 +98,9 @@ struct ad7944_chip_info {
const struct iio_chan_spec channels[2];
};
+/* get number of bytes for SPI xfer */
+#define AD7944_SPI_BYTES(scan_type) ((scan_type).realbits > 16 ? 4 : 2)
+
/*
* AD7944_DEFINE_CHIP_INFO - Define a chip info structure for a specific chip
* @_name: The name of the chip
@@ -164,7 +167,7 @@ static int ad7944_3wire_cs_mode_init_msg(struct device *dev, struct ad7944_adc *
/* Then we can read the data during the acquisition phase */
xfers[2].rx_buf = &adc->sample.raw;
- xfers[2].len = BITS_TO_BYTES(chan->scan_type.storagebits);
+ xfers[2].len = AD7944_SPI_BYTES(chan->scan_type);
xfers[2].bits_per_word = chan->scan_type.realbits;
spi_message_init_with_transfers(&adc->msg, xfers, 3);
@@ -193,7 +196,7 @@ static int ad7944_4wire_mode_init_msg(struct device *dev, struct ad7944_adc *adc
xfers[0].delay.unit = SPI_DELAY_UNIT_NSECS;
xfers[1].rx_buf = &adc->sample.raw;
- xfers[1].len = BITS_TO_BYTES(chan->scan_type.storagebits);
+ xfers[1].len = AD7944_SPI_BYTES(chan->scan_type);
xfers[1].bits_per_word = chan->scan_type.realbits;
spi_message_init_with_transfers(&adc->msg, xfers, 2);
@@ -228,7 +231,7 @@ static int ad7944_chain_mode_init_msg(struct device *dev, struct ad7944_adc *adc
xfers[0].delay.unit = SPI_DELAY_UNIT_NSECS;
xfers[1].rx_buf = adc->chain_mode_buf;
- xfers[1].len = BITS_TO_BYTES(chan->scan_type.storagebits) * n_chain_dev;
+ xfers[1].len = AD7944_SPI_BYTES(chan->scan_type) * n_chain_dev;
xfers[1].bits_per_word = chan->scan_type.realbits;
spi_message_init_with_transfers(&adc->msg, xfers, 2);
@@ -274,12 +277,12 @@ static int ad7944_single_conversion(struct ad7944_adc *adc,
return ret;
if (adc->spi_mode == AD7944_SPI_MODE_CHAIN) {
- if (chan->scan_type.storagebits > 16)
+ if (chan->scan_type.realbits > 16)
*val = ((u32 *)adc->chain_mode_buf)[chan->scan_index];
else
*val = ((u16 *)adc->chain_mode_buf)[chan->scan_index];
} else {
- if (chan->scan_type.storagebits > 16)
+ if (chan->scan_type.realbits > 16)
*val = adc->sample.raw.u32;
else
*val = adc->sample.raw.u16;
@@ -409,8 +412,7 @@ static int ad7944_chain_mode_alloc(struct device *dev,
/* 1 word for each voltage channel + aligned u64 for timestamp */
chain_mode_buf_size = ALIGN(n_chain_dev *
- BITS_TO_BYTES(chan[0].scan_type.storagebits), sizeof(u64))
- + sizeof(u64);
+ AD7944_SPI_BYTES(chan[0].scan_type), sizeof(u64)) + sizeof(u64);
buf = devm_kzalloc(dev, chain_mode_buf_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
index 221a5fdc1eaa..e41650177085 100644
--- a/drivers/iio/adc/dln2-adc.c
+++ b/drivers/iio/adc/dln2-adc.c
@@ -467,7 +467,7 @@ static irqreturn_t dln2_adc_trigger_h(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct {
__le16 values[DLN2_ADC_MAX_CHANNELS];
- int64_t timestamp_space;
+ aligned_s64 timestamp_space;
} data;
struct dln2_adc_get_all_vals dev_data;
struct dln2_adc *dln2 = iio_priv(indio_dev);
diff --git a/drivers/iio/adc/qcom-spmi-iadc.c b/drivers/iio/adc/qcom-spmi-iadc.c
index 7fb8b2499a1d..b64a8a407168 100644
--- a/drivers/iio/adc/qcom-spmi-iadc.c
+++ b/drivers/iio/adc/qcom-spmi-iadc.c
@@ -543,7 +543,9 @@ static int iadc_probe(struct platform_device *pdev)
else
return ret;
} else {
- device_init_wakeup(iadc->dev, 1);
+ ret = devm_device_init_wakeup(iadc->dev);
+ if (ret)
+ return dev_err_probe(iadc->dev, ret, "Failed to init wakeup\n");
}
ret = iadc_update_offset(iadc);
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index a29e54754c8f..ab4de67fb135 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -480,15 +480,6 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
if (info->reset)
rockchip_saradc_reset_controller(info->reset);
- /*
- * Use a default value for the converter clock.
- * This may become user-configurable in the future.
- */
- ret = clk_set_rate(info->clk, info->data->clk_rate);
- if (ret < 0)
- return dev_err_probe(&pdev->dev, ret,
- "failed to set adc clk rate\n");
-
ret = regulator_enable(info->vref);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret,
@@ -515,6 +506,14 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
if (IS_ERR(info->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(info->clk),
"failed to get adc clock\n");
+ /*
+ * Use a default value for the converter clock.
+ * This may become user-configurable in the future.
+ */
+ ret = clk_set_rate(info->clk, info->data->clk_rate);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to set adc clk rate\n");
platform_set_drvdata(pdev, indio_dev);
diff --git a/drivers/iio/chemical/pms7003.c b/drivers/iio/chemical/pms7003.c
index d0bd94912e0a..e05ce1f12065 100644
--- a/drivers/iio/chemical/pms7003.c
+++ b/drivers/iio/chemical/pms7003.c
@@ -5,7 +5,6 @@
* Copyright (c) Tomasz Duszynski <tduszyns@gmail.com>
*/
-#include <linux/unaligned.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/errno.h>
@@ -19,6 +18,8 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/serdev.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
#define PMS7003_DRIVER_NAME "pms7003"
@@ -76,7 +77,7 @@ struct pms7003_state {
/* Used to construct scan to push to the IIO buffer */
struct {
u16 data[3]; /* PM1, PM2P5, PM10 */
- s64 ts;
+ aligned_s64 ts;
} scan;
};
diff --git a/drivers/iio/chemical/sps30.c b/drivers/iio/chemical/sps30.c
index 6f4f2ba2c09d..a7888146188d 100644
--- a/drivers/iio/chemical/sps30.c
+++ b/drivers/iio/chemical/sps30.c
@@ -108,7 +108,7 @@ static irqreturn_t sps30_trigger_handler(int irq, void *p)
int ret;
struct {
s32 data[4]; /* PM1, PM2P5, PM4, PM10 */
- s64 ts;
+ aligned_s64 ts;
} scan;
mutex_lock(&state->lock);
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index ad1882f608c0..2055a03cbeb1 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -66,6 +66,10 @@ static struct {
{HID_USAGE_SENSOR_HUMIDITY, 0, 1000, 0},
{HID_USAGE_SENSOR_HINGE, 0, 0, 17453293},
{HID_USAGE_SENSOR_HINGE, HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453293},
+
+ {HID_USAGE_SENSOR_HUMAN_PRESENCE, 0, 1, 0},
+ {HID_USAGE_SENSOR_HUMAN_PROXIMITY, 0, 1, 0},
+ {HID_USAGE_SENSOR_HUMAN_ATTENTION, 0, 1, 0},
};
static void simple_div(int dividend, int divisor, int *whole,
diff --git a/drivers/iio/dac/ad3552r-hs.c b/drivers/iio/dac/ad3552r-hs.c
index 8974df625670..67957fc21696 100644
--- a/drivers/iio/dac/ad3552r-hs.c
+++ b/drivers/iio/dac/ad3552r-hs.c
@@ -137,13 +137,20 @@ static int ad3552r_hs_buffer_postenable(struct iio_dev *indio_dev)
if (ret)
return ret;
+ /* Primary region access, set streaming mode (now in SPI + SDR). */
+ ret = ad3552r_qspi_update_reg_bits(st,
+ AD3552R_REG_ADDR_INTERFACE_CONFIG_B,
+ AD3552R_MASK_SINGLE_INST, 0, 1);
+ if (ret)
+ return ret;
+
/* Inform DAC chip to switch into DDR mode */
ret = ad3552r_qspi_update_reg_bits(st,
AD3552R_REG_ADDR_INTERFACE_CONFIG_D,
AD3552R_MASK_SPI_CONFIG_DDR,
AD3552R_MASK_SPI_CONFIG_DDR, 1);
if (ret)
- return ret;
+ goto exit_err_ddr;
/* Inform DAC IP to go for DDR mode from now on */
ret = iio_backend_ddr_enable(st->back);
@@ -174,6 +181,11 @@ exit_err:
iio_backend_ddr_disable(st->back);
+exit_err_ddr:
+ ad3552r_qspi_update_reg_bits(st, AD3552R_REG_ADDR_INTERFACE_CONFIG_B,
+ AD3552R_MASK_SINGLE_INST,
+ AD3552R_MASK_SINGLE_INST, 1);
+
return ret;
}
@@ -198,6 +210,14 @@ static int ad3552r_hs_buffer_predisable(struct iio_dev *indio_dev)
if (ret)
return ret;
+ /* Back to single instruction mode, disabling loop. */
+ ret = ad3552r_qspi_update_reg_bits(st,
+ AD3552R_REG_ADDR_INTERFACE_CONFIG_B,
+ AD3552R_MASK_SINGLE_INST,
+ AD3552R_MASK_SINGLE_INST, 1);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -308,6 +328,13 @@ static int ad3552r_hs_setup(struct ad3552r_hs_state *st)
if (ret)
return ret;
+ ret = st->data->bus_reg_write(st->back,
+ AD3552R_REG_ADDR_INTERFACE_CONFIG_B,
+ AD3552R_MASK_SINGLE_INST |
+ AD3552R_MASK_SHORT_INSTRUCTION, 1);
+ if (ret)
+ return ret;
+
ret = ad3552r_hs_scratch_pad_test(st);
if (ret)
return ret;
diff --git a/drivers/iio/dac/ad3552r-hs.h b/drivers/iio/dac/ad3552r-hs.h
index 724261d38dea..4a9e35234124 100644
--- a/drivers/iio/dac/ad3552r-hs.h
+++ b/drivers/iio/dac/ad3552r-hs.h
@@ -8,11 +8,19 @@
struct iio_backend;
+enum ad3552r_io_mode {
+ AD3552R_IO_MODE_SPI,
+ AD3552R_IO_MODE_DSPI,
+ AD3552R_IO_MODE_QSPI,
+};
+
struct ad3552r_hs_platform_data {
int (*bus_reg_read)(struct iio_backend *back, u32 reg, u32 *val,
size_t data_size);
int (*bus_reg_write)(struct iio_backend *back, u32 reg, u32 val,
size_t data_size);
+ int (*bus_set_io_mode)(struct iio_backend *back,
+ enum ad3552r_io_mode mode);
u32 bus_sample_data_clock_hz;
};
diff --git a/drivers/iio/dac/adi-axi-dac.c b/drivers/iio/dac/adi-axi-dac.c
index ac871deb8063..bcaf365feef4 100644
--- a/drivers/iio/dac/adi-axi-dac.c
+++ b/drivers/iio/dac/adi-axi-dac.c
@@ -64,7 +64,7 @@
#define AXI_DAC_UI_STATUS_IF_BUSY BIT(4)
#define AXI_DAC_CUSTOM_CTRL_REG 0x008C
#define AXI_DAC_CUSTOM_CTRL_ADDRESS GENMASK(31, 24)
-#define AXI_DAC_CUSTOM_CTRL_SYNCED_TRANSFER BIT(2)
+#define AXI_DAC_CUSTOM_CTRL_MULTI_IO_MODE GENMASK(3, 2)
#define AXI_DAC_CUSTOM_CTRL_STREAM BIT(1)
#define AXI_DAC_CUSTOM_CTRL_TRANSFER_DATA BIT(0)
@@ -722,6 +722,25 @@ static int axi_dac_bus_reg_read(struct iio_backend *back, u32 reg, u32 *val,
return regmap_read(st->regmap, AXI_DAC_CUSTOM_RD_REG, val);
}
+static int axi_dac_bus_set_io_mode(struct iio_backend *back,
+ enum ad3552r_io_mode mode)
+{
+ struct axi_dac_state *st = iio_backend_get_priv(back);
+ int ival, ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_update_bits(st->regmap, AXI_DAC_CUSTOM_CTRL_REG,
+ AXI_DAC_CUSTOM_CTRL_MULTI_IO_MODE,
+ FIELD_PREP(AXI_DAC_CUSTOM_CTRL_MULTI_IO_MODE, mode));
+ if (ret)
+ return ret;
+
+ return regmap_read_poll_timeout(st->regmap, AXI_DAC_UI_STATUS_REG, ival,
+ FIELD_GET(AXI_DAC_UI_STATUS_IF_BUSY, ival) == 0, 10,
+ 100 * KILO);
+}
+
static void axi_dac_child_remove(void *data)
{
platform_device_unregister(data);
@@ -733,6 +752,7 @@ static int axi_dac_create_platform_device(struct axi_dac_state *st,
struct ad3552r_hs_platform_data pdata = {
.bus_reg_read = axi_dac_bus_reg_read,
.bus_reg_write = axi_dac_bus_reg_write,
+ .bus_set_io_mode = axi_dac_bus_set_io_mode,
.bus_sample_data_clock_hz = st->dac_clk_rate,
};
struct platform_device_info pi = {
diff --git a/drivers/iio/imu/bmi270/bmi270_core.c b/drivers/iio/imu/bmi270/bmi270_core.c
index 7fec52e0b486..950fcacddd40 100644
--- a/drivers/iio/imu/bmi270/bmi270_core.c
+++ b/drivers/iio/imu/bmi270/bmi270_core.c
@@ -654,8 +654,7 @@ static int bmi270_configure_imu(struct bmi270_data *bmi270_device)
FIELD_PREP(BMI270_ACC_CONF_ODR_MSK,
BMI270_ACC_CONF_ODR_100HZ) |
FIELD_PREP(BMI270_ACC_CONF_BWP_MSK,
- BMI270_ACC_CONF_BWP_NORMAL_MODE) |
- BMI270_PWR_CONF_ADV_PWR_SAVE_MSK);
+ BMI270_ACC_CONF_BWP_NORMAL_MODE));
if (ret)
return dev_err_probe(dev, ret, "Failed to configure accelerometer");
@@ -663,8 +662,7 @@ static int bmi270_configure_imu(struct bmi270_data *bmi270_device)
FIELD_PREP(BMI270_GYR_CONF_ODR_MSK,
BMI270_GYR_CONF_ODR_200HZ) |
FIELD_PREP(BMI270_GYR_CONF_BWP_MSK,
- BMI270_GYR_CONF_BWP_NORMAL_MODE) |
- BMI270_PWR_CONF_ADV_PWR_SAVE_MSK);
+ BMI270_GYR_CONF_BWP_NORMAL_MODE));
if (ret)
return dev_err_probe(dev, ret, "Failed to configure gyroscope");
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 3d3b27f28c9d..273196e647a2 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -50,7 +50,7 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
u16 fifo_count;
u32 fifo_period;
s64 timestamp;
- u8 data[INV_MPU6050_OUTPUT_DATA_SIZE];
+ u8 data[INV_MPU6050_OUTPUT_DATA_SIZE] __aligned(8);
size_t i, nb;
mutex_lock(&st->lock);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 0a7cd8c1aa33..8a9d2593576a 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -392,6 +392,9 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
if (fifo_status & cpu_to_le16(ST_LSM6DSX_FIFO_EMPTY_MASK))
return 0;
+ if (!pattern_len)
+ pattern_len = ST_LSM6DSX_SAMPLE_SIZE;
+
fifo_len = (le16_to_cpu(fifo_status) & fifo_diff_mask) *
ST_LSM6DSX_CHAN_SIZE;
fifo_len = (fifo_len / pattern_len) * pattern_len;
@@ -623,6 +626,9 @@ int st_lsm6dsx_read_tagged_fifo(struct st_lsm6dsx_hw *hw)
if (!fifo_len)
return 0;
+ if (!pattern_len)
+ pattern_len = ST_LSM6DSX_TAGGED_SAMPLE_SIZE;
+
for (read_len = 0; read_len < fifo_len; read_len += pattern_len) {
err = st_lsm6dsx_read_block(hw,
ST_LSM6DSX_REG_FIFO_OUT_TAG_ADDR,
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 4fdcc2acc94e..96c6106b95ee 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -2719,8 +2719,11 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
}
if (device_property_read_bool(dev, "wakeup-source") ||
- (pdata && pdata->wakeup_source))
- device_init_wakeup(dev, true);
+ (pdata && pdata->wakeup_source)) {
+ err = devm_device_init_wakeup(dev);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to init wakeup\n");
+ }
return 0;
}
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index 76b76d12b388..4c65b32d34ce 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -34,9 +34,9 @@ struct prox_state {
struct iio_chan_spec channels[MAX_CHANNELS];
u32 channel2usage[MAX_CHANNELS];
u32 human_presence[MAX_CHANNELS];
- int scale_pre_decml;
- int scale_post_decml;
- int scale_precision;
+ int scale_pre_decml[MAX_CHANNELS];
+ int scale_post_decml[MAX_CHANNELS];
+ int scale_precision[MAX_CHANNELS];
unsigned long scan_mask[2]; /* One entry plus one terminator. */
int num_channels;
};
@@ -116,13 +116,15 @@ static int prox_read_raw(struct iio_dev *indio_dev,
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SCALE:
- *val = prox_state->scale_pre_decml;
- *val2 = prox_state->scale_post_decml;
- ret_type = prox_state->scale_precision;
+ if (chan->scan_index >= prox_state->num_channels)
+ return -EINVAL;
+
+ *val = prox_state->scale_pre_decml[chan->scan_index];
+ *val2 = prox_state->scale_post_decml[chan->scan_index];
+ ret_type = prox_state->scale_precision[chan->scan_index];
break;
case IIO_CHAN_INFO_OFFSET:
- *val = hid_sensor_convert_exponent(
- prox_state->prox_attr[chan->scan_index].unit_expo);
+ *val = 0;
ret_type = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SAMP_FREQ:
@@ -249,6 +251,10 @@ static int prox_parse_report(struct platform_device *pdev,
st->prox_attr[index].size);
dev_dbg(&pdev->dev, "prox %x:%x\n", st->prox_attr[index].index,
st->prox_attr[index].report_id);
+ st->scale_precision[index] =
+ hid_sensor_format_scale(usage_id, &st->prox_attr[index],
+ &st->scale_pre_decml[index],
+ &st->scale_post_decml[index]);
index++;
}
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index 65b295877b41..393a3d2fbe1d 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -788,8 +788,9 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
int ret;
bool wake_result_ready_queue = false;
enum iio_chan_type chan_type = opt->chip_info->chan_type;
+ bool ok_to_ignore_lock = opt->ok_to_ignore_lock;
- if (!opt->ok_to_ignore_lock)
+ if (!ok_to_ignore_lock)
mutex_lock(&opt->lock);
ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
@@ -826,7 +827,7 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
}
out:
- if (!opt->ok_to_ignore_lock)
+ if (!ok_to_ignore_lock)
mutex_unlock(&opt->lock);
if (wake_result_ready_queue)
diff --git a/drivers/iio/pressure/mprls0025pa.h b/drivers/iio/pressure/mprls0025pa.h
index 9d5c30afa9d6..d62a018eaff3 100644
--- a/drivers/iio/pressure/mprls0025pa.h
+++ b/drivers/iio/pressure/mprls0025pa.h
@@ -34,16 +34,6 @@ struct iio_dev;
struct mpr_data;
struct mpr_ops;
-/**
- * struct mpr_chan
- * @pres: pressure value
- * @ts: timestamp
- */
-struct mpr_chan {
- s32 pres;
- s64 ts;
-};
-
enum mpr_func_id {
MPR_FUNCTION_A,
MPR_FUNCTION_B,
@@ -69,6 +59,8 @@ enum mpr_func_id {
* reading in a loop until data is ready
* @completion: handshake from irq to read
* @chan: channel values for buffered mode
+ * @chan.pres: pressure value
+ * @chan.ts: timestamp
* @buffer: raw conversion data
*/
struct mpr_data {
@@ -87,7 +79,10 @@ struct mpr_data {
struct gpio_desc *gpiod_reset;
int irq;
struct completion completion;
- struct mpr_chan chan;
+ struct {
+ s32 pres;
+ aligned_s64 ts;
+ } chan;
u8 buffer[MPR_MEASUREMENT_RD_SIZE] __aligned(IIO_DMA_MINALIGN);
};
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index c28a7a6dea5f..555a61e2f3fd 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -121,9 +121,9 @@ static const struct maxim_thermocouple_chip maxim_thermocouple_chips[] = {
struct maxim_thermocouple_data {
struct spi_device *spi;
const struct maxim_thermocouple_chip *chip;
+ char tc_type;
u8 buffer[16] __aligned(IIO_DMA_MINALIGN);
- char tc_type;
};
static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index ee75b99f84bc..baf12e3e2a7e 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1352,6 +1352,9 @@ static void ib_device_notify_register(struct ib_device *device)
down_read(&devices_rwsem);
+ /* Mark for userspace that device is ready */
+ kobject_uevent(&device->dev.kobj, KOBJ_ADD);
+
ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT);
if (ret)
goto out;
@@ -1468,10 +1471,9 @@ int ib_register_device(struct ib_device *device, const char *name,
return ret;
}
dev_set_uevent_suppress(&device->dev, false);
- /* Mark for userspace that device is ready */
- kobject_uevent(&device->dev.kobj, KOBJ_ADD);
ib_device_notify_register(device);
+
ib_device_put(device);
return 0;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 07c571c7b699..c5b686394760 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -80,9 +80,12 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
unsigned long pgsz_bitmap,
unsigned long virt)
{
- struct scatterlist *sg;
+ unsigned long curr_len = 0;
+ dma_addr_t curr_base = ~0;
unsigned long va, pgoff;
+ struct scatterlist *sg;
dma_addr_t mask;
+ dma_addr_t end;
int i;
umem->iova = va = virt;
@@ -107,17 +110,30 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
pgoff = umem->address & ~PAGE_MASK;
for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
- /* Walk SGL and reduce max page size if VA/PA bits differ
- * for any address.
+ /* If the current entry is physically contiguous with the previous
+ * one, no need to take its start addresses into consideration.
*/
- mask |= (sg_dma_address(sg) + pgoff) ^ va;
+ if (check_add_overflow(curr_base, curr_len, &end) ||
+ end != sg_dma_address(sg)) {
+
+ curr_base = sg_dma_address(sg);
+ curr_len = 0;
+
+ /* Reduce max page size if VA/PA bits differ */
+ mask |= (curr_base + pgoff) ^ va;
+
+ /* The alignment of any VA matching a discontinuity point
+ * in the physical memory sets the maximum possible page
+ * size as this must be a starting point of a new page that
+ * needs to be aligned.
+ */
+ if (i != 0)
+ mask |= va;
+ }
+
+ curr_len += sg_dma_len(sg);
va += sg_dma_len(sg) - pgoff;
- /* Except for the last entry, the ending iova alignment sets
- * the maximum possible page size as the low bits of the iova
- * must be zero when starting the next chunk.
- */
- if (i != (umem->sgt_append.sgt.nents - 1))
- mask |= va;
+
pgoff = 0;
}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 5ad14c39d48c..de75dcc0947c 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -716,8 +716,8 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
goto err_free;
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
- if (!pd) {
- ret = -EINVAL;
+ if (IS_ERR(pd)) {
+ ret = PTR_ERR(pd);
goto err_free;
}
@@ -807,8 +807,8 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
if (cmd.flags & IB_MR_REREG_PD) {
new_pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle,
attrs);
- if (!new_pd) {
- ret = -EINVAL;
+ if (IS_ERR(new_pd)) {
+ ret = PTR_ERR(new_pd);
goto put_uobjs;
}
} else {
@@ -917,8 +917,8 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
return PTR_ERR(uobj);
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
- if (!pd) {
- ret = -EINVAL;
+ if (IS_ERR(pd)) {
+ ret = PTR_ERR(pd);
goto err_free;
}
@@ -1125,8 +1125,8 @@ static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs)
return ret;
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
- if (!cq)
- return -EINVAL;
+ if (IS_ERR(cq))
+ return PTR_ERR(cq);
ret = cq->device->ops.resize_cq(cq, cmd.cqe, &attrs->driver_udata);
if (ret)
@@ -1187,8 +1187,8 @@ static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs)
return ret;
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
- if (!cq)
- return -EINVAL;
+ if (IS_ERR(cq))
+ return PTR_ERR(cq);
/* we copy a struct ib_uverbs_poll_cq_resp to user space */
header_ptr = attrs->ucore.outbuf;
@@ -1236,8 +1236,8 @@ static int ib_uverbs_req_notify_cq(struct uverbs_attr_bundle *attrs)
return ret;
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
- if (!cq)
- return -EINVAL;
+ if (IS_ERR(cq))
+ return PTR_ERR(cq);
ib_req_notify_cq(cq, cmd.solicited_only ?
IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
@@ -1319,8 +1319,8 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
ind_tbl = uobj_get_obj_read(rwq_ind_table,
UVERBS_OBJECT_RWQ_IND_TBL,
cmd->rwq_ind_tbl_handle, attrs);
- if (!ind_tbl) {
- ret = -EINVAL;
+ if (IS_ERR(ind_tbl)) {
+ ret = PTR_ERR(ind_tbl);
goto err_put;
}
@@ -1358,8 +1358,10 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
if (cmd->is_srq) {
srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ,
cmd->srq_handle, attrs);
- if (!srq || srq->srq_type == IB_SRQT_XRC) {
- ret = -EINVAL;
+ if (IS_ERR(srq) ||
+ srq->srq_type == IB_SRQT_XRC) {
+ ret = IS_ERR(srq) ? PTR_ERR(srq) :
+ -EINVAL;
goto err_put;
}
}
@@ -1369,23 +1371,29 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
rcq = uobj_get_obj_read(
cq, UVERBS_OBJECT_CQ,
cmd->recv_cq_handle, attrs);
- if (!rcq) {
- ret = -EINVAL;
+ if (IS_ERR(rcq)) {
+ ret = PTR_ERR(rcq);
goto err_put;
}
}
}
}
- if (has_sq)
+ if (has_sq) {
scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
cmd->send_cq_handle, attrs);
+ if (IS_ERR(scq)) {
+ ret = PTR_ERR(scq);
+ goto err_put;
+ }
+ }
+
if (!ind_tbl && cmd->qp_type != IB_QPT_XRC_INI)
rcq = rcq ?: scq;
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle,
attrs);
- if (!pd || (!scq && has_sq)) {
- ret = -EINVAL;
+ if (IS_ERR(pd)) {
+ ret = PTR_ERR(pd);
goto err_put;
}
@@ -1480,18 +1488,18 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
err_put:
if (!IS_ERR(xrcd_uobj))
uobj_put_read(xrcd_uobj);
- if (pd)
+ if (!IS_ERR_OR_NULL(pd))
uobj_put_obj_read(pd);
- if (scq)
+ if (!IS_ERR_OR_NULL(scq))
rdma_lookup_put_uobject(&scq->uobject->uevent.uobject,
UVERBS_LOOKUP_READ);
- if (rcq && rcq != scq)
+ if (!IS_ERR_OR_NULL(rcq) && rcq != scq)
rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject,
UVERBS_LOOKUP_READ);
- if (srq)
+ if (!IS_ERR_OR_NULL(srq))
rdma_lookup_put_uobject(&srq->uobject->uevent.uobject,
UVERBS_LOOKUP_READ);
- if (ind_tbl)
+ if (!IS_ERR_OR_NULL(ind_tbl))
uobj_put_obj_read(ind_tbl);
uobj_alloc_abort(&obj->uevent.uobject, attrs);
@@ -1653,8 +1661,8 @@ static int ib_uverbs_query_qp(struct uverbs_attr_bundle *attrs)
}
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
- if (!qp) {
- ret = -EINVAL;
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
goto out;
}
@@ -1759,8 +1767,8 @@ static int modify_qp(struct uverbs_attr_bundle *attrs,
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle,
attrs);
- if (!qp) {
- ret = -EINVAL;
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
goto out;
}
@@ -2026,8 +2034,8 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
return -ENOMEM;
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
- if (!qp) {
- ret = -EINVAL;
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
goto out;
}
@@ -2064,9 +2072,9 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH,
user_wr->wr.ud.ah, attrs);
- if (!ud->ah) {
+ if (IS_ERR(ud->ah)) {
+ ret = PTR_ERR(ud->ah);
kfree(ud);
- ret = -EINVAL;
goto out_put;
}
ud->remote_qpn = user_wr->wr.ud.remote_qpn;
@@ -2303,8 +2311,8 @@ static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs)
return PTR_ERR(wr);
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
- if (!qp) {
- ret = -EINVAL;
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
goto out;
}
@@ -2354,8 +2362,8 @@ static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs)
return PTR_ERR(wr);
srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
- if (!srq) {
- ret = -EINVAL;
+ if (IS_ERR(srq)) {
+ ret = PTR_ERR(srq);
goto out;
}
@@ -2411,8 +2419,8 @@ static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs)
}
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
- if (!pd) {
- ret = -EINVAL;
+ if (IS_ERR(pd)) {
+ ret = PTR_ERR(pd);
goto err;
}
@@ -2481,8 +2489,8 @@ static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs)
return ret;
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
- if (!qp)
- return -EINVAL;
+ if (IS_ERR(qp))
+ return PTR_ERR(qp);
obj = qp->uobject;
@@ -2531,8 +2539,8 @@ static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs)
return ret;
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
- if (!qp)
- return -EINVAL;
+ if (IS_ERR(qp))
+ return PTR_ERR(qp);
obj = qp->uobject;
mutex_lock(&obj->mcast_lock);
@@ -2666,8 +2674,8 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
UVERBS_OBJECT_FLOW_ACTION,
kern_spec->action.handle,
attrs);
- if (!ib_spec->action.act)
- return -EINVAL;
+ if (IS_ERR(ib_spec->action.act))
+ return PTR_ERR(ib_spec->action.act);
ib_spec->action.size =
sizeof(struct ib_flow_spec_action_handle);
flow_resources_add(uflow_res,
@@ -2684,8 +2692,8 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
UVERBS_OBJECT_COUNTERS,
kern_spec->flow_count.handle,
attrs);
- if (!ib_spec->flow_count.counters)
- return -EINVAL;
+ if (IS_ERR(ib_spec->flow_count.counters))
+ return PTR_ERR(ib_spec->flow_count.counters);
ib_spec->flow_count.size =
sizeof(struct ib_flow_spec_action_count);
flow_resources_add(uflow_res,
@@ -2903,14 +2911,14 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
return PTR_ERR(obj);
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs);
- if (!pd) {
- err = -EINVAL;
+ if (IS_ERR(pd)) {
+ err = PTR_ERR(pd);
goto err_uobj;
}
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
- if (!cq) {
- err = -EINVAL;
+ if (IS_ERR(cq)) {
+ err = PTR_ERR(cq);
goto err_put_pd;
}
@@ -3011,8 +3019,8 @@ static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs)
return -EINVAL;
wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, attrs);
- if (!wq)
- return -EINVAL;
+ if (IS_ERR(wq))
+ return PTR_ERR(wq);
if (cmd.attr_mask & IB_WQ_FLAGS) {
wq_attr.flags = cmd.flags;
@@ -3095,8 +3103,8 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
num_read_wqs++) {
wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ,
wqs_handles[num_read_wqs], attrs);
- if (!wq) {
- err = -EINVAL;
+ if (IS_ERR(wq)) {
+ err = PTR_ERR(wq);
goto put_wqs;
}
@@ -3251,8 +3259,8 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
}
qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
- if (!qp) {
- err = -EINVAL;
+ if (IS_ERR(qp)) {
+ err = PTR_ERR(qp);
goto err_uobj;
}
@@ -3398,15 +3406,15 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
if (ib_srq_has_cq(cmd->srq_type)) {
attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ,
cmd->cq_handle, attrs);
- if (!attr.ext.cq) {
- ret = -EINVAL;
+ if (IS_ERR(attr.ext.cq)) {
+ ret = PTR_ERR(attr.ext.cq);
goto err_put_xrcd;
}
}
pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, attrs);
- if (!pd) {
- ret = -EINVAL;
+ if (IS_ERR(pd)) {
+ ret = PTR_ERR(pd);
goto err_put_cq;
}
@@ -3513,8 +3521,8 @@ static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs)
return ret;
srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
- if (!srq)
- return -EINVAL;
+ if (IS_ERR(srq))
+ return PTR_ERR(srq);
attr.max_wr = cmd.max_wr;
attr.srq_limit = cmd.srq_limit;
@@ -3541,8 +3549,8 @@ static int ib_uverbs_query_srq(struct uverbs_attr_bundle *attrs)
return ret;
srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs);
- if (!srq)
- return -EINVAL;
+ if (IS_ERR(srq))
+ return PTR_ERR(srq);
ret = ib_query_srq(srq, &attr);
@@ -3667,8 +3675,8 @@ static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs)
return -EOPNOTSUPP;
cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs);
- if (!cq)
- return -EINVAL;
+ if (IS_ERR(cq))
+ return PTR_ERR(cq);
ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 473ee0831307..dc40001072a5 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -3109,22 +3109,23 @@ EXPORT_SYMBOL(__rdma_block_iter_start);
bool __rdma_block_iter_next(struct ib_block_iter *biter)
{
unsigned int block_offset;
- unsigned int sg_delta;
+ unsigned int delta;
if (!biter->__sg_nents || !biter->__sg)
return false;
biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
- sg_delta = BIT_ULL(biter->__pg_bit) - block_offset;
+ delta = BIT_ULL(biter->__pg_bit) - block_offset;
- if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) {
- biter->__sg_advance += sg_delta;
- } else {
+ while (biter->__sg_nents && biter->__sg &&
+ sg_dma_len(biter->__sg) - biter->__sg_advance <= delta) {
+ delta -= sg_dma_len(biter->__sg) - biter->__sg_advance;
biter->__sg_advance = 0;
biter->__sg = sg_next(biter->__sg);
biter->__sg_nents--;
}
+ biter->__sg_advance += delta;
return true;
}
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index b27791029fa9..b9f4a2937c3a 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -55,6 +55,7 @@ static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
struct inode *inode = new_inode(dir->i_sb);
if (!inode) {
+ dput(dentry);
error = -EPERM;
goto bail;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index fec87c9030ab..fffd144d509e 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -56,11 +56,8 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
- if (err) {
- vfree(cq->queue->buf);
- kfree(cq->queue);
+ if (err)
return err;
- }
cq->is_user = uresp;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index c33e6f33265b..400e7aac6aaf 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -77,12 +77,13 @@
* xbox d-pads should map to buttons, as is required for DDR pads
* but we map them to axes when possible to simplify things
*/
-#define MAP_DPAD_TO_BUTTONS (1 << 0)
-#define MAP_TRIGGERS_TO_BUTTONS (1 << 1)
-#define MAP_STICKS_TO_NULL (1 << 2)
-#define MAP_SELECT_BUTTON (1 << 3)
-#define MAP_PADDLES (1 << 4)
-#define MAP_PROFILE_BUTTON (1 << 5)
+#define MAP_DPAD_TO_BUTTONS BIT(0)
+#define MAP_TRIGGERS_TO_BUTTONS BIT(1)
+#define MAP_STICKS_TO_NULL BIT(2)
+#define MAP_SHARE_BUTTON BIT(3)
+#define MAP_PADDLES BIT(4)
+#define MAP_PROFILE_BUTTON BIT(5)
+#define MAP_SHARE_OFFSET BIT(6)
#define DANCEPAD_MAP_CONFIG (MAP_DPAD_TO_BUTTONS | \
MAP_TRIGGERS_TO_BUTTONS | MAP_STICKS_TO_NULL)
@@ -135,7 +136,7 @@ static const struct xpad_device {
{ 0x03f0, 0x048D, "HyperX Clutch", 0, XTYPE_XBOX360 }, /* wireless */
{ 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE },
{ 0x03f0, 0x07A0, "HyperX Clutch Gladiate RGB", 0, XTYPE_XBOXONE },
- { 0x03f0, 0x08B6, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE }, /* v2 */
+ { 0x03f0, 0x08B6, "HyperX Clutch Gladiate", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, /* v2 */
{ 0x03f0, 0x09B4, "HyperX Clutch Tanto", 0, XTYPE_XBOXONE },
{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
@@ -159,7 +160,7 @@ static const struct xpad_device {
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x045e, 0x0b00, "Microsoft X-Box One Elite 2 pad", MAP_PADDLES, XTYPE_XBOXONE },
{ 0x045e, 0x0b0a, "Microsoft X-Box Adaptive Controller", MAP_PROFILE_BUTTON, XTYPE_XBOXONE },
- { 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
+ { 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SHARE_BUTTON | MAP_SHARE_OFFSET, XTYPE_XBOXONE },
{ 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 },
{ 0x046d, 0xc21e, "Logitech Gamepad F510", 0, XTYPE_XBOX360 },
{ 0x046d, 0xc21f, "Logitech Gamepad F710", 0, XTYPE_XBOX360 },
@@ -205,13 +206,13 @@ static const struct xpad_device {
{ 0x0738, 0x9871, "Mad Catz Portable Drum", 0, XTYPE_XBOX360 },
{ 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 },
{ 0x0738, 0xb738, "Mad Catz MVC2TE Stick 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
- { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
+ { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", 0, XTYPE_XBOX360 },
{ 0x0738, 0xcb02, "Saitek Cyborg Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0738, 0xcb03, "Saitek P3200 Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0738, 0xcb29, "Saitek Aviator Stick AV8R02", 0, XTYPE_XBOX360 },
{ 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 },
{ 0x07ff, 0xffff, "Mad Catz GamePad", 0, XTYPE_XBOX360 },
- { 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", 0, XTYPE_XBOXONE },
+ { 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x0b05, 0x1abb, "ASUS ROG RAIKIRI PRO", 0, XTYPE_XBOXONE },
{ 0x0c12, 0x0005, "Intec wireless", 0, XTYPE_XBOX },
{ 0x0c12, 0x8801, "Nyko Xbox Controller", 0, XTYPE_XBOX },
@@ -240,7 +241,7 @@ static const struct xpad_device {
{ 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0147, "PDP Marvel Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x015c, "PDP Xbox One Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
- { 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", XTYPE_XBOXONE },
+ { 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0161, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0162, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0163, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
@@ -288,6 +289,8 @@ static const struct xpad_device {
{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x10f5, 0x7005, "Turtle Beach Recon Controller", 0, XTYPE_XBOXONE },
+ { 0x10f5, 0x7008, "Turtle Beach Recon Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
+ { 0x10f5, 0x7073, "Turtle Beach Stealth Ultra Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
{ 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 },
{ 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 },
@@ -352,6 +355,7 @@ static const struct xpad_device {
{ 0x1ee9, 0x1590, "ZOTAC Gaming Zone", 0, XTYPE_XBOX360 },
{ 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
{ 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE },
+ { 0x20d6, 0x2064, "PowerA Wired Controller for Xbox", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
{ 0x2345, 0xe00b, "Machenike G5 Pro Controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
@@ -387,10 +391,11 @@ static const struct xpad_device {
{ 0x2dc8, 0x3106, "8BitDo Ultimate Wireless / Pro 2 Wired Controller", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x3109, "8BitDo Ultimate Wireless Bluetooth", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x310a, "8BitDo Ultimate 2C Wireless Controller", 0, XTYPE_XBOX360 },
+ { 0x2dc8, 0x310b, "8BitDo Ultimate 2 Wireless Controller", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x6001, "8BitDo SN30 Pro", 0, XTYPE_XBOX360 },
{ 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x2e24, 0x1688, "Hyperkin X91 X-Box One pad", 0, XTYPE_XBOXONE },
- { 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
+ { 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 },
@@ -1027,7 +1032,7 @@ static void xpad360w_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned cha
* The report format was gleaned from
* https://github.com/kylelemons/xbox/blob/master/xbox.go
*/
-static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data)
+static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data, u32 len)
{
struct input_dev *dev = xpad->dev;
bool do_sync = false;
@@ -1068,8 +1073,12 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
/* menu/view buttons */
input_report_key(dev, BTN_START, data[4] & BIT(2));
input_report_key(dev, BTN_SELECT, data[4] & BIT(3));
- if (xpad->mapping & MAP_SELECT_BUTTON)
- input_report_key(dev, KEY_RECORD, data[22] & BIT(0));
+ if (xpad->mapping & MAP_SHARE_BUTTON) {
+ if (xpad->mapping & MAP_SHARE_OFFSET)
+ input_report_key(dev, KEY_RECORD, data[len - 26] & BIT(0));
+ else
+ input_report_key(dev, KEY_RECORD, data[len - 18] & BIT(0));
+ }
/* buttons A,B,X,Y */
input_report_key(dev, BTN_A, data[4] & BIT(4));
@@ -1217,7 +1226,7 @@ static void xpad_irq_in(struct urb *urb)
xpad360w_process_packet(xpad, 0, xpad->idata);
break;
case XTYPE_XBOXONE:
- xpadone_process_packet(xpad, 0, xpad->idata);
+ xpadone_process_packet(xpad, 0, xpad->idata, urb->actual_length);
break;
default:
xpad_process_packet(xpad, 0, xpad->idata);
@@ -1944,7 +1953,7 @@ static int xpad_init_input(struct usb_xpad *xpad)
xpad->xtype == XTYPE_XBOXONE) {
for (i = 0; xpad360_btn[i] >= 0; i++)
input_set_capability(input_dev, EV_KEY, xpad360_btn[i]);
- if (xpad->mapping & MAP_SELECT_BUTTON)
+ if (xpad->mapping & MAP_SHARE_BUTTON)
input_set_capability(input_dev, EV_KEY, KEY_RECORD);
} else {
for (i = 0; xpad_btn[i] >= 0; i++)
diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c
index 5ad6be914160..061d48350df6 100644
--- a/drivers/input/keyboard/mtk-pmic-keys.c
+++ b/drivers/input/keyboard/mtk-pmic-keys.c
@@ -147,8 +147,8 @@ static void mtk_pmic_keys_lp_reset_setup(struct mtk_pmic_keys *keys,
u32 value, mask;
int error;
- kregs_home = keys->keys[MTK_PMIC_HOMEKEY_INDEX].regs;
- kregs_pwr = keys->keys[MTK_PMIC_PWRKEY_INDEX].regs;
+ kregs_home = &regs->keys_regs[MTK_PMIC_HOMEKEY_INDEX];
+ kregs_pwr = &regs->keys_regs[MTK_PMIC_PWRKEY_INDEX];
error = of_property_read_u32(keys->dev->of_node, "power-off-time-sec",
&long_press_debounce);
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index aba57abe6978..b3a1f7a3acc3 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -163,6 +163,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
static const char * const smbus_pnp_ids[] = {
/* all of the topbuttonpad_pnp_ids are valid, we just add some extras */
+ "DLL060d", /* Dell Precision M3800 */
"LEN0048", /* X1 Carbon 3 */
"LEN0046", /* X250 */
"LEN0049", /* Yoga 11e */
@@ -189,11 +190,15 @@ static const char * const smbus_pnp_ids[] = {
"LEN2054", /* E480 */
"LEN2055", /* E580 */
"LEN2068", /* T14 Gen 1 */
+ "SYN1221", /* TUXEDO InfinityBook Pro 14 v5 */
+ "SYN3003", /* HP EliteBook 850 G1 */
"SYN3015", /* HP EliteBook 840 G2 */
"SYN3052", /* HP EliteBook 840 G4 */
"SYN3221", /* HP 15-ay000 */
"SYN323d", /* HP Spectre X360 13-w013dx */
"SYN3257", /* HP Envy 13-ad105ng */
+ "TOS01f6", /* Dynabook Portege X30L-G */
+ "TOS0213", /* Dynabook Portege X30-D */
NULL
};
diff --git a/drivers/input/touchscreen/cyttsp5.c b/drivers/input/touchscreen/cyttsp5.c
index eafe5a9b8964..071b7c9bf566 100644
--- a/drivers/input/touchscreen/cyttsp5.c
+++ b/drivers/input/touchscreen/cyttsp5.c
@@ -580,7 +580,7 @@ static int cyttsp5_power_control(struct cyttsp5 *ts, bool on)
int rc;
SET_CMD_REPORT_TYPE(cmd[0], 0);
- SET_CMD_REPORT_ID(cmd[0], HID_POWER_SLEEP);
+ SET_CMD_REPORT_ID(cmd[0], state);
SET_CMD_OPCODE(cmd[1], HID_CMD_SET_POWER);
rc = cyttsp5_write(ts, HID_COMMAND_REG, cmd, sizeof(cmd));
@@ -870,13 +870,16 @@ static int cyttsp5_probe(struct device *dev, struct regmap *regmap, int irq,
ts->input->phys = ts->phys;
input_set_drvdata(ts->input, ts);
- /* Reset the gpio to be in a reset state */
+ /* Assert gpio to be in a reset state */
ts->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ts->reset_gpio)) {
error = PTR_ERR(ts->reset_gpio);
dev_err(dev, "Failed to request reset gpio, error %d\n", error);
return error;
}
+
+ fsleep(10); /* Ensure long-enough reset pulse (minimum 10us). */
+
gpiod_set_value_cansleep(ts->reset_gpio, 0);
/* Need a delay to have device up */
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index cb536d372b12..fb82f8035c0f 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -3677,6 +3677,14 @@ found:
while (*uid == '0' && *(uid + 1))
uid++;
+ if (strlen(hid) >= ACPIHID_HID_LEN) {
+ pr_err("Invalid command line: hid is too long\n");
+ return 1;
+ } else if (strlen(uid) >= ACPIHID_UID_LEN) {
+ pr_err("Invalid command line: uid is too long\n");
+ return 1;
+ }
+
i = early_acpihid_map_size++;
memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
index c616de2c5926..a56a27396305 100644
--- a/drivers/iommu/amd/io_pgtable_v2.c
+++ b/drivers/iommu/amd/io_pgtable_v2.c
@@ -254,7 +254,7 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
pte = v2_alloc_pte(cfg->amd.nid, pgtable->pgd,
iova, map_size, gfp, &updated);
if (!pte) {
- ret = -EINVAL;
+ ret = -ENOMEM;
goto out;
}
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index cd5116d8c3b2..b3a01b7757ee 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -3850,7 +3850,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
* we should not modify the IRTE
*/
if (!dev_data || !dev_data->use_vapic)
- return 0;
+ return -EINVAL;
ir_data->cfg = irqd_cfg(data);
pi_data->ir_data = ir_data;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
index 5aa2e7af58b4..34a0be59cd91 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
@@ -43,6 +43,8 @@ static void arm_smmu_make_nested_cd_table_ste(
target->data[0] |= nested_domain->ste[0] &
~cpu_to_le64(STRTAB_STE_0_CFG);
target->data[1] |= nested_domain->ste[1];
+ /* Merge events for DoS mitigations on eventq */
+ target->data[1] |= cpu_to_le64(STRTAB_STE_1_MEV);
}
/*
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 9ba596430e7c..980cc6b33c43 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -411,6 +411,12 @@ struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
return ERR_CAST(smmu_domain);
smmu_domain->domain.type = IOMMU_DOMAIN_SVA;
smmu_domain->domain.ops = &arm_smmu_sva_domain_ops;
+
+ /*
+ * Choose page_size as the leaf page size for invalidation when
+ * ARM_SMMU_FEAT_RANGE_INV is present
+ */
+ smmu_domain->domain.pgsize_bitmap = PAGE_SIZE;
smmu_domain->smmu = smmu;
ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain,
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 358072b4e293..e495334d1c43 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1052,7 +1052,7 @@ void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits)
cpu_to_le64(STRTAB_STE_1_S1DSS | STRTAB_STE_1_S1CIR |
STRTAB_STE_1_S1COR | STRTAB_STE_1_S1CSH |
STRTAB_STE_1_S1STALLD | STRTAB_STE_1_STRW |
- STRTAB_STE_1_EATS);
+ STRTAB_STE_1_EATS | STRTAB_STE_1_MEV);
used_bits[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID);
/*
@@ -1068,7 +1068,7 @@ void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits)
if (cfg & BIT(1)) {
used_bits[1] |=
cpu_to_le64(STRTAB_STE_1_S2FWB | STRTAB_STE_1_EATS |
- STRTAB_STE_1_SHCFG);
+ STRTAB_STE_1_SHCFG | STRTAB_STE_1_MEV);
used_bits[2] |=
cpu_to_le64(STRTAB_STE_2_S2VMID | STRTAB_STE_2_VTCR |
STRTAB_STE_2_S2AA64 | STRTAB_STE_2_S2ENDI |
@@ -3364,6 +3364,7 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
mutex_lock(&smmu->streams_mutex);
for (i = 0; i < fwspec->num_ids; i++) {
struct arm_smmu_stream *new_stream = &master->streams[i];
+ struct rb_node *existing;
u32 sid = fwspec->ids[i];
new_stream->id = sid;
@@ -3374,10 +3375,20 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
break;
/* Insert into SID tree */
- if (rb_find_add(&new_stream->node, &smmu->streams,
- arm_smmu_streams_cmp_node)) {
- dev_warn(master->dev, "stream %u already in tree\n",
- sid);
+ existing = rb_find_add(&new_stream->node, &smmu->streams,
+ arm_smmu_streams_cmp_node);
+ if (existing) {
+ struct arm_smmu_master *existing_master =
+ rb_entry(existing, struct arm_smmu_stream, node)
+ ->master;
+
+ /* Bridged PCI devices may end up with duplicated IDs */
+ if (existing_master == master)
+ continue;
+
+ dev_warn(master->dev,
+ "stream %u already in tree from dev %s\n", sid,
+ dev_name(existing_master->dev));
ret = -EINVAL;
break;
}
@@ -4405,6 +4416,8 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3);
if (FIELD_GET(IDR3_RIL, reg))
smmu->features |= ARM_SMMU_FEAT_RANGE_INV;
+ if (FIELD_GET(IDR3_FWB, reg))
+ smmu->features |= ARM_SMMU_FEAT_S2FWB;
/* IDR5 */
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index bd9d7c85576a..7290bd4c2bb0 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -266,6 +266,7 @@ static inline u32 arm_smmu_strtab_l2_idx(u32 sid)
#define STRTAB_STE_1_S1COR GENMASK_ULL(5, 4)
#define STRTAB_STE_1_S1CSH GENMASK_ULL(7, 6)
+#define STRTAB_STE_1_MEV (1UL << 19)
#define STRTAB_STE_1_S2FWB (1UL << 25)
#define STRTAB_STE_1_S1STALLD (1UL << 27)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 2a9fa0c8cc00..0f0caf59023c 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -1815,7 +1815,7 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
static DEFINE_MUTEX(msi_prepare_lock); /* see below */
if (!domain || !domain->iova_cookie) {
- desc->iommu_cookie = NULL;
+ msi_desc_set_iommu_msi_iova(desc, 0, 0);
return 0;
}
@@ -1827,11 +1827,12 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
mutex_lock(&msi_prepare_lock);
msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
mutex_unlock(&msi_prepare_lock);
-
- msi_desc_set_iommu_cookie(desc, msi_page);
-
if (!msi_page)
return -ENOMEM;
+
+ msi_desc_set_iommu_msi_iova(
+ desc, msi_page->iova,
+ ilog2(cookie_msi_granule(domain->iova_cookie)));
return 0;
}
@@ -1842,18 +1843,15 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
*/
void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
{
- struct device *dev = msi_desc_to_dev(desc);
- const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- const struct iommu_dma_msi_page *msi_page;
+#ifdef CONFIG_IRQ_MSI_IOMMU
+ if (desc->iommu_msi_shift) {
+ u64 msi_iova = desc->iommu_msi_iova << desc->iommu_msi_shift;
- msi_page = msi_desc_get_iommu_cookie(desc);
-
- if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
- return;
-
- msg->address_hi = upper_32_bits(msi_page->iova);
- msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
- msg->address_lo += lower_32_bits(msi_page->iova);
+ msg->address_hi = upper_32_bits(msi_iova);
+ msg->address_lo = lower_32_bits(msi_iova) |
+ (msg->address_lo & ((1 << desc->iommu_msi_shift) - 1));
+ }
+#endif
}
static int iommu_dma_init(void)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 76417bd5e926..580d24dd4edd 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -3863,41 +3863,6 @@ static struct iommu_group *intel_iommu_device_group(struct device *dev)
return generic_device_group(dev);
}
-static int intel_iommu_enable_sva(struct device *dev)
-{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct intel_iommu *iommu;
-
- if (!info || dmar_disabled)
- return -EINVAL;
-
- iommu = info->iommu;
- if (!iommu)
- return -EINVAL;
-
- if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE))
- return -ENODEV;
-
- if (!info->pasid_enabled || !info->ats_enabled)
- return -EINVAL;
-
- /*
- * Devices having device-specific I/O fault handling should not
- * support PCI/PRI. The IOMMU side has no means to check the
- * capability of device-specific IOPF. Therefore, IOMMU can only
- * default that if the device driver enables SVA on a non-PRI
- * device, it will handle IOPF in its own way.
- */
- if (!info->pri_supported)
- return 0;
-
- /* Devices supporting PRI should have it enabled. */
- if (!info->pri_enabled)
- return -EINVAL;
-
- return 0;
-}
-
static int context_flip_pri(struct device_domain_info *info, bool enable)
{
struct intel_iommu *iommu = info->iommu;
@@ -4018,7 +3983,7 @@ intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
return intel_iommu_enable_iopf(dev);
case IOMMU_DEV_FEAT_SVA:
- return intel_iommu_enable_sva(dev);
+ return 0;
default:
return -ENODEV;
@@ -4504,6 +4469,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
+/* QM57/QS57 integrated gfx malfunctions with dmar */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_iommu_igfx);
+
/* Broadwell igfx malfunctions with dmar */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
@@ -4581,7 +4549,6 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index f5569347591f..ba93123cb4eb 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -110,6 +110,41 @@ static const struct mmu_notifier_ops intel_mmuops = {
.free_notifier = intel_mm_free_notifier,
};
+static int intel_iommu_sva_supported(struct device *dev)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu;
+
+ if (!info || dmar_disabled)
+ return -EINVAL;
+
+ iommu = info->iommu;
+ if (!iommu)
+ return -EINVAL;
+
+ if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE))
+ return -ENODEV;
+
+ if (!info->pasid_enabled || !info->ats_enabled)
+ return -EINVAL;
+
+ /*
+ * Devices having device-specific I/O fault handling should not
+ * support PCI/PRI. The IOMMU side has no means to check the
+ * capability of device-specific IOPF. Therefore, IOMMU can only
+ * default that if the device driver enables SVA on a non-PRI
+ * device, it will handle IOPF in its own way.
+ */
+ if (!info->pri_supported)
+ return 0;
+
+ /* Devices supporting PRI should have it enabled. */
+ if (!info->pri_enabled)
+ return -EINVAL;
+
+ return 0;
+}
+
static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid,
struct iommu_domain *old)
@@ -121,6 +156,10 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
unsigned long sflags;
int ret = 0;
+ ret = intel_iommu_sva_supported(dev);
+ if (ret)
+ return ret;
+
dev_pasid = domain_add_dev_pasid(domain, dev, pasid);
if (IS_ERR(dev_pasid))
return PTR_ERR(dev_pasid);
@@ -161,6 +200,10 @@ struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
struct dmar_domain *domain;
int ret;
+ ret = intel_iommu_sva_supported(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/iommu/iommu-priv.h b/drivers/iommu/iommu-priv.h
index de5b54eaa8bf..a5913c0b02a0 100644
--- a/drivers/iommu/iommu-priv.h
+++ b/drivers/iommu/iommu-priv.h
@@ -17,6 +17,8 @@ static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
return dev->iommu->iommu_dev->ops;
}
+void dev_iommu_free(struct device *dev);
+
const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode);
static inline const struct iommu_ops *iommu_fwspec_ops(struct iommu_fwspec *fwspec)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index e3df1f06afbe..6e59b2c4c39b 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -273,6 +273,8 @@ int iommu_device_register(struct iommu_device *iommu,
err = bus_iommu_probe(iommu_buses[i]);
if (err)
iommu_device_unregister(iommu);
+ else
+ WRITE_ONCE(iommu->ready, true);
return err;
}
EXPORT_SYMBOL_GPL(iommu_device_register);
@@ -352,7 +354,7 @@ static struct dev_iommu *dev_iommu_get(struct device *dev)
return param;
}
-static void dev_iommu_free(struct device *dev)
+void dev_iommu_free(struct device *dev)
{
struct dev_iommu *param = dev->iommu;
@@ -508,6 +510,9 @@ static void iommu_deinit_device(struct device *dev)
dev->iommu_group = NULL;
module_put(ops->owner);
dev_iommu_free(dev);
+#ifdef CONFIG_IOMMU_DMA
+ dev->dma_iommu = false;
+#endif
}
DEFINE_MUTEX(iommu_probe_device_lock);
@@ -2798,31 +2803,39 @@ bool iommu_default_passthrough(void)
}
EXPORT_SYMBOL_GPL(iommu_default_passthrough);
-const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode)
+static const struct iommu_device *iommu_from_fwnode(const struct fwnode_handle *fwnode)
{
- const struct iommu_ops *ops = NULL;
- struct iommu_device *iommu;
+ const struct iommu_device *iommu, *ret = NULL;
spin_lock(&iommu_device_lock);
list_for_each_entry(iommu, &iommu_device_list, list)
if (iommu->fwnode == fwnode) {
- ops = iommu->ops;
+ ret = iommu;
break;
}
spin_unlock(&iommu_device_lock);
- return ops;
+ return ret;
+}
+
+const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode)
+{
+ const struct iommu_device *iommu = iommu_from_fwnode(fwnode);
+
+ return iommu ? iommu->ops : NULL;
}
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode)
{
- const struct iommu_ops *ops = iommu_ops_from_fwnode(iommu_fwnode);
+ const struct iommu_device *iommu = iommu_from_fwnode(iommu_fwnode);
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- if (!ops)
+ if (!iommu)
return driver_deferred_probe_check_state(dev);
+ if (!dev->iommu && !READ_ONCE(iommu->ready))
+ return -EPROBE_DEFER;
if (fwspec)
- return ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL;
+ return iommu->ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL;
if (!dev_iommu_get(dev))
return -ENOMEM;
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index 3c7800d4ab62..66a6b7466820 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -3,6 +3,7 @@
*/
#include <linux/iommu.h>
#include <linux/iommufd.h>
+#include <linux/pci-ats.h>
#include <linux/slab.h>
#include <uapi/linux/iommufd.h>
@@ -1304,7 +1305,8 @@ int iommufd_get_hw_info(struct iommufd_ucmd *ucmd)
void *data;
int rc;
- if (cmd->flags || cmd->__reserved)
+ if (cmd->flags || cmd->__reserved[0] || cmd->__reserved[1] ||
+ cmd->__reserved[2])
return -EOPNOTSUPP;
idev = iommufd_get_device(ucmd, cmd->dev_id);
@@ -1361,6 +1363,36 @@ int iommufd_get_hw_info(struct iommufd_ucmd *ucmd)
if (device_iommu_capable(idev->dev, IOMMU_CAP_DIRTY_TRACKING))
cmd->out_capabilities |= IOMMU_HW_CAP_DIRTY_TRACKING;
+ cmd->out_max_pasid_log2 = 0;
+ /*
+ * Currently, all iommu drivers enable PASID in the probe_device()
+ * op if iommu and device supports it. So the max_pasids stored in
+ * dev->iommu indicates both PASID support and enable status. A
+ * non-zero dev->iommu->max_pasids means PASID is supported and
+ * enabled. The iommufd only reports PASID capability to userspace
+ * if it's enabled.
+ */
+ if (idev->dev->iommu->max_pasids) {
+ cmd->out_max_pasid_log2 = ilog2(idev->dev->iommu->max_pasids);
+
+ if (dev_is_pci(idev->dev)) {
+ struct pci_dev *pdev = to_pci_dev(idev->dev);
+ int ctrl;
+
+ ctrl = pci_pasid_status(pdev);
+
+ WARN_ON_ONCE(ctrl < 0 ||
+ !(ctrl & PCI_PASID_CTRL_ENABLE));
+
+ if (ctrl & PCI_PASID_CTRL_EXEC)
+ cmd->out_capabilities |=
+ IOMMU_HW_CAP_PCI_PASID_EXEC;
+ if (ctrl & PCI_PASID_CTRL_PRIV)
+ cmd->out_capabilities |=
+ IOMMU_HW_CAP_PCI_PASID_PRIV;
+ }
+ }
+
rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
out_free:
kfree(data);
diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c
index 598be26a14e2..9b5b0b852229 100644
--- a/drivers/iommu/iommufd/hw_pagetable.c
+++ b/drivers/iommu/iommufd/hw_pagetable.c
@@ -126,6 +126,9 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
if ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) &&
!device_iommu_capable(idev->dev, IOMMU_CAP_DIRTY_TRACKING))
return ERR_PTR(-EOPNOTSUPP);
+ if ((flags & IOMMU_HWPT_FAULT_ID_VALID) &&
+ (flags & IOMMU_HWPT_ALLOC_NEST_PARENT))
+ return ERR_PTR(-EOPNOTSUPP);
hwpt_paging = __iommufd_object_alloc(
ictx, hwpt_paging, IOMMUFD_OBJ_HWPT_PAGING, common.obj);
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 97987cd78da9..e10a68b5ffde 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -116,6 +116,7 @@ static void of_pci_check_device_ats(struct device *dev, struct device_node *np)
int of_iommu_configure(struct device *dev, struct device_node *master_np,
const u32 *id)
{
+ bool dev_iommu_present;
int err;
if (!master_np)
@@ -127,6 +128,7 @@ int of_iommu_configure(struct device *dev, struct device_node *master_np,
mutex_unlock(&iommu_probe_device_lock);
return 0;
}
+ dev_iommu_present = dev->iommu;
/*
* We don't currently walk up the tree looking for a parent IOMMU.
@@ -147,8 +149,10 @@ int of_iommu_configure(struct device *dev, struct device_node *master_np,
err = of_iommu_configure_device(master_np, dev, id);
}
- if (err)
+ if (err && dev_iommu_present)
iommu_fwspec_free(dev);
+ else if (err && dev->iommu)
+ dev_iommu_free(dev);
mutex_unlock(&iommu_probe_device_lock);
if (!err && dev->bus)
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index be35c5349986..a1e370d0200f 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -423,7 +423,7 @@ static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
#ifdef CONFIG_ACPI
static int acpi_num_msi;
-static __init struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
+static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
{
struct v2m_data *data;
diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c
index 7942d8eb3d00..f772deb9cba5 100644
--- a/drivers/irqchip/irq-qcom-mpm.c
+++ b/drivers/irqchip/irq-qcom-mpm.c
@@ -227,6 +227,9 @@ static int qcom_mpm_alloc(struct irq_domain *domain, unsigned int virq,
if (ret)
return ret;
+ if (pin == GPIO_NO_WAKE_IRQ)
+ return irq_domain_disconnect_hierarchy(domain, virq);
+
ret = irq_domain_set_hwirq_and_chip(domain, virq, pin,
&qcom_mpm_chip, priv);
if (ret)
diff --git a/drivers/irqchip/irq-renesas-rzv2h.c b/drivers/irqchip/irq-renesas-rzv2h.c
index f6363246a71a..21d01ce2da5c 100644
--- a/drivers/irqchip/irq-renesas-rzv2h.c
+++ b/drivers/irqchip/irq-renesas-rzv2h.c
@@ -81,17 +81,27 @@
#define ICU_PB5_TINT 0x55
/**
+ * struct rzv2h_hw_info - Interrupt Control Unit controller hardware info structure.
+ * @t_offs: TINT offset
+ */
+struct rzv2h_hw_info {
+ u16 t_offs;
+};
+
+/**
* struct rzv2h_icu_priv - Interrupt Control Unit controller private data structure.
* @base: Controller's base address
* @irqchip: Pointer to struct irq_chip
* @fwspec: IRQ firmware specific data
* @lock: Lock to serialize access to hardware registers
+ * @info: Pointer to struct rzv2h_hw_info
*/
struct rzv2h_icu_priv {
void __iomem *base;
const struct irq_chip *irqchip;
struct irq_fwspec fwspec[ICU_NUM_IRQ];
raw_spinlock_t lock;
+ const struct rzv2h_hw_info *info;
};
static inline struct rzv2h_icu_priv *irq_data_to_priv(struct irq_data *data)
@@ -111,7 +121,7 @@ static void rzv2h_icu_eoi(struct irq_data *d)
tintirq_nr = hw_irq - ICU_TINT_START;
bit = BIT(tintirq_nr);
if (!irqd_is_level_type(d))
- writel_relaxed(bit, priv->base + ICU_TSCLR);
+ writel_relaxed(bit, priv->base + priv->info->t_offs + ICU_TSCLR);
} else if (hw_irq >= ICU_IRQ_START) {
tintirq_nr = hw_irq - ICU_IRQ_START;
bit = BIT(tintirq_nr);
@@ -139,12 +149,20 @@ static void rzv2h_tint_irq_endisable(struct irq_data *d, bool enable)
tssel_n = ICU_TSSR_TSSEL_N(tint_nr);
guard(raw_spinlock)(&priv->lock);
- tssr = readl_relaxed(priv->base + ICU_TSSR(k));
+ tssr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSSR(k));
if (enable)
tssr |= ICU_TSSR_TIEN(tssel_n);
else
tssr &= ~ICU_TSSR_TIEN(tssel_n);
- writel_relaxed(tssr, priv->base + ICU_TSSR(k));
+ writel_relaxed(tssr, priv->base + priv->info->t_offs + ICU_TSSR(k));
+
+ /*
+ * A glitch in the edge detection circuit can cause a spurious
+ * interrupt. Clear the status flag after setting the ICU_TSSRk
+ * registers, which is recommended by the hardware manual as a
+ * countermeasure.
+ */
+ writel_relaxed(BIT(tint_nr), priv->base + priv->info->t_offs + ICU_TSCLR);
}
static void rzv2h_icu_irq_disable(struct irq_data *d)
@@ -247,8 +265,8 @@ static void rzv2h_clear_tint_int(struct rzv2h_icu_priv *priv, unsigned int hwirq
u32 bit = BIT(tint_nr);
int k = tint_nr / 16;
- tsctr = readl_relaxed(priv->base + ICU_TSCTR);
- titsr = readl_relaxed(priv->base + ICU_TITSR(k));
+ tsctr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSCTR);
+ titsr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TITSR(k));
titsel = ICU_TITSR_TITSEL_GET(titsr, titsel_n);
/*
@@ -257,7 +275,7 @@ static void rzv2h_clear_tint_int(struct rzv2h_icu_priv *priv, unsigned int hwirq
*/
if ((tsctr & bit) && ((titsel == ICU_TINT_EDGE_RISING) ||
(titsel == ICU_TINT_EDGE_FALLING)))
- writel_relaxed(bit, priv->base + ICU_TSCLR);
+ writel_relaxed(bit, priv->base + priv->info->t_offs + ICU_TSCLR);
}
static int rzv2h_tint_set_type(struct irq_data *d, unsigned int type)
@@ -308,21 +326,21 @@ static int rzv2h_tint_set_type(struct irq_data *d, unsigned int type)
guard(raw_spinlock)(&priv->lock);
- tssr = readl_relaxed(priv->base + ICU_TSSR(tssr_k));
+ tssr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSSR(tssr_k));
tssr &= ~(ICU_TSSR_TSSEL_MASK(tssel_n) | tien);
tssr |= ICU_TSSR_TSSEL_PREP(tint, tssel_n);
- writel_relaxed(tssr, priv->base + ICU_TSSR(tssr_k));
+ writel_relaxed(tssr, priv->base + priv->info->t_offs + ICU_TSSR(tssr_k));
- titsr = readl_relaxed(priv->base + ICU_TITSR(titsr_k));
+ titsr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TITSR(titsr_k));
titsr &= ~ICU_TITSR_TITSEL_MASK(titsel_n);
titsr |= ICU_TITSR_TITSEL_PREP(sense, titsel_n);
- writel_relaxed(titsr, priv->base + ICU_TITSR(titsr_k));
+ writel_relaxed(titsr, priv->base + priv->info->t_offs + ICU_TITSR(titsr_k));
rzv2h_clear_tint_int(priv, hwirq);
- writel_relaxed(tssr | tien, priv->base + ICU_TSSR(tssr_k));
+ writel_relaxed(tssr | tien, priv->base + priv->info->t_offs + ICU_TSSR(tssr_k));
return 0;
}
@@ -421,7 +439,13 @@ static int rzv2h_icu_parse_interrupts(struct rzv2h_icu_priv *priv, struct device
return 0;
}
-static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
+static void rzv2h_icu_put_device(void *data)
+{
+ put_device(data);
+}
+
+static int rzv2h_icu_init_common(struct device_node *node, struct device_node *parent,
+ const struct rzv2h_hw_info *hw_info)
{
struct irq_domain *irq_domain, *parent_domain;
struct rzv2h_icu_priv *rzv2h_icu_data;
@@ -433,43 +457,41 @@ static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
if (!pdev)
return -ENODEV;
+ ret = devm_add_action_or_reset(&pdev->dev, rzv2h_icu_put_device,
+ &pdev->dev);
+ if (ret < 0)
+ return ret;
+
parent_domain = irq_find_host(parent);
if (!parent_domain) {
dev_err(&pdev->dev, "cannot find parent domain\n");
- ret = -ENODEV;
- goto put_dev;
+ return -ENODEV;
}
rzv2h_icu_data = devm_kzalloc(&pdev->dev, sizeof(*rzv2h_icu_data), GFP_KERNEL);
- if (!rzv2h_icu_data) {
- ret = -ENOMEM;
- goto put_dev;
- }
+ if (!rzv2h_icu_data)
+ return -ENOMEM;
rzv2h_icu_data->irqchip = &rzv2h_icu_chip;
rzv2h_icu_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
- if (IS_ERR(rzv2h_icu_data->base)) {
- ret = PTR_ERR(rzv2h_icu_data->base);
- goto put_dev;
- }
+ if (IS_ERR(rzv2h_icu_data->base))
+ return PTR_ERR(rzv2h_icu_data->base);
ret = rzv2h_icu_parse_interrupts(rzv2h_icu_data, node);
if (ret) {
dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret);
- goto put_dev;
+ return ret;
}
resetn = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(resetn)) {
- ret = PTR_ERR(resetn);
- goto put_dev;
- }
+ if (IS_ERR(resetn))
+ return PTR_ERR(resetn);
ret = reset_control_deassert(resetn);
if (ret) {
dev_err(&pdev->dev, "failed to deassert resetn pin, %d\n", ret);
- goto put_dev;
+ return ret;
}
pm_runtime_enable(&pdev->dev);
@@ -489,6 +511,8 @@ static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
goto pm_put;
}
+ rzv2h_icu_data->info = hw_info;
+
/*
* coccicheck complains about a missing put_device call before returning, but it's a false
* positive. We still need &pdev->dev after successfully returning from this function.
@@ -500,12 +524,19 @@ pm_put:
pm_disable:
pm_runtime_disable(&pdev->dev);
reset_control_assert(resetn);
-put_dev:
- put_device(&pdev->dev);
return ret;
}
+static const struct rzv2h_hw_info rzv2h_hw_params = {
+ .t_offs = 0,
+};
+
+static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
+{
+ return rzv2h_icu_init_common(node, parent, &rzv2h_hw_params);
+}
+
IRQCHIP_PLATFORM_DRIVER_BEGIN(rzv2h_icu)
IRQCHIP_MATCH("renesas,r9a09g057-icu", rzv2h_icu_init)
IRQCHIP_PLATFORM_DRIVER_END(rzv2h_icu)
diff --git a/drivers/irqchip/irq-riscv-aplic-direct.c b/drivers/irqchip/irq-riscv-aplic-direct.c
index 7cd6b646774b..205ad61d15e4 100644
--- a/drivers/irqchip/irq-riscv-aplic-direct.c
+++ b/drivers/irqchip/irq-riscv-aplic-direct.c
@@ -31,7 +31,7 @@ struct aplic_direct {
};
struct aplic_idc {
- unsigned int hart_index;
+ u32 hart_index;
void __iomem *regs;
struct aplic_direct *direct;
};
@@ -219,6 +219,20 @@ static int aplic_direct_parse_parent_hwirq(struct device *dev, u32 index,
return 0;
}
+static int aplic_direct_get_hart_index(struct device *dev, u32 logical_index,
+ u32 *hart_index)
+{
+ const char *prop_hart_index = "riscv,hart-indexes";
+ struct device_node *np = to_of_node(dev->fwnode);
+
+ if (!np || !of_property_present(np, prop_hart_index)) {
+ *hart_index = logical_index;
+ return 0;
+ }
+
+ return of_property_read_u32_index(np, prop_hart_index, logical_index, hart_index);
+}
+
int aplic_direct_setup(struct device *dev, void __iomem *regs)
{
int i, j, rc, cpu, current_cpu, setup_count = 0;
@@ -265,8 +279,12 @@ int aplic_direct_setup(struct device *dev, void __iomem *regs)
cpumask_set_cpu(cpu, &direct->lmask);
idc = per_cpu_ptr(&aplic_idcs, cpu);
- idc->hart_index = i;
- idc->regs = priv->regs + APLIC_IDC_BASE + i * APLIC_IDC_SIZE;
+ rc = aplic_direct_get_hart_index(dev, i, &idc->hart_index);
+ if (rc) {
+ dev_warn(dev, "hart index not found for IDC%d\n", i);
+ continue;
+ }
+ idc->regs = priv->regs + APLIC_IDC_BASE + idc->hart_index * APLIC_IDC_SIZE;
idc->direct = direct;
aplic_idc_set_delivery(idc, true);
diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c
index 275df5005705..553650932c75 100644
--- a/drivers/irqchip/irq-riscv-imsic-early.c
+++ b/drivers/irqchip/irq-riscv-imsic-early.c
@@ -77,6 +77,12 @@ static void imsic_handle_irq(struct irq_desc *desc)
struct imsic_vector *vec;
unsigned long local_id;
+ /*
+ * Process pending local synchronization instead of waiting
+ * for per-CPU local timer to expire.
+ */
+ imsic_local_sync_all(false);
+
chained_irq_enter(chip, desc);
while ((local_id = csr_swap(CSR_TOPEI, 0))) {
@@ -120,7 +126,7 @@ static int imsic_starting_cpu(unsigned int cpu)
* Interrupts identities might have been enabled/disabled while
* this CPU was not running so sync-up local enable/disable state.
*/
- imsic_local_sync_all();
+ imsic_local_sync_all(true);
/* Enable local interrupt delivery */
imsic_local_delivery(true);
diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c
index c708780e8760..5d7c30ad8855 100644
--- a/drivers/irqchip/irq-riscv-imsic-platform.c
+++ b/drivers/irqchip/irq-riscv-imsic-platform.c
@@ -96,9 +96,8 @@ static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask
bool force)
{
struct imsic_vector *old_vec, *new_vec;
- struct irq_data *pd = d->parent_data;
- old_vec = irq_data_get_irq_chip_data(pd);
+ old_vec = irq_data_get_irq_chip_data(d);
if (WARN_ON(!old_vec))
return -ENOENT;
@@ -116,13 +115,13 @@ static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask
return -ENOSPC;
/* Point device to the new vector */
- imsic_msi_update_msg(d, new_vec);
+ imsic_msi_update_msg(irq_get_irq_data(d->irq), new_vec);
/* Update irq descriptors with the new vector */
- pd->chip_data = new_vec;
+ d->chip_data = new_vec;
- /* Update effective affinity of parent irq data */
- irq_data_update_effective_affinity(pd, cpumask_of(new_vec->cpu));
+ /* Update effective affinity */
+ irq_data_update_effective_affinity(d, cpumask_of(new_vec->cpu));
/* Move state of the old vector to the new vector */
imsic_vector_move(old_vec, new_vec);
@@ -135,6 +134,9 @@ static struct irq_chip imsic_irq_base_chip = {
.name = "IMSIC",
.irq_mask = imsic_irq_mask,
.irq_unmask = imsic_irq_unmask,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = imsic_irq_set_affinity,
+#endif
.irq_retrigger = imsic_irq_retrigger,
.irq_compose_msi_msg = imsic_irq_compose_msg,
.flags = IRQCHIP_SKIP_SET_WAKE |
@@ -245,7 +247,7 @@ static bool imsic_init_dev_msi_info(struct device *dev,
if (WARN_ON_ONCE(domain != real_parent))
return false;
#ifdef CONFIG_SMP
- info->chip->irq_set_affinity = imsic_irq_set_affinity;
+ info->chip->irq_set_affinity = irq_chip_set_affinity_parent;
#endif
break;
default:
diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c
index b97e6cd89ed7..06ff0e17c0c3 100644
--- a/drivers/irqchip/irq-riscv-imsic-state.c
+++ b/drivers/irqchip/irq-riscv-imsic-state.c
@@ -124,10 +124,11 @@ void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend,
}
}
-static void __imsic_local_sync(struct imsic_local_priv *lpriv)
+static bool __imsic_local_sync(struct imsic_local_priv *lpriv)
{
struct imsic_local_config *mlocal;
struct imsic_vector *vec, *mvec;
+ bool ret = true;
int i;
lockdep_assert_held(&lpriv->lock);
@@ -143,35 +144,75 @@ static void __imsic_local_sync(struct imsic_local_priv *lpriv)
__imsic_id_clear_enable(i);
/*
- * If the ID was being moved to a new ID on some other CPU
- * then we can get a MSI during the movement so check the
- * ID pending bit and re-trigger the new ID on other CPU
- * using MMIO write.
+ * Clear the previous vector pointer of the new vector only
+ * after the movement is complete on the old CPU.
*/
- mvec = READ_ONCE(vec->move);
- WRITE_ONCE(vec->move, NULL);
- if (mvec && mvec != vec) {
+ mvec = READ_ONCE(vec->move_prev);
+ if (mvec) {
+ /*
+ * If the old vector has not been updated then
+ * try again in the next sync-up call.
+ */
+ if (READ_ONCE(mvec->move_next)) {
+ ret = false;
+ continue;
+ }
+
+ WRITE_ONCE(vec->move_prev, NULL);
+ }
+
+ /*
+ * If a vector was being moved to a new vector on some other
+ * CPU then we can get a MSI during the movement so check the
+ * ID pending bit and re-trigger the new ID on other CPU using
+ * MMIO write.
+ */
+ mvec = READ_ONCE(vec->move_next);
+ if (mvec) {
if (__imsic_id_read_clear_pending(i)) {
mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu);
writel_relaxed(mvec->local_id, mlocal->msi_va);
}
+ WRITE_ONCE(vec->move_next, NULL);
imsic_vector_free(&lpriv->vectors[i]);
}
skip:
bitmap_clear(lpriv->dirty_bitmap, i, 1);
}
+
+ return ret;
}
-void imsic_local_sync_all(void)
+#ifdef CONFIG_SMP
+static void __imsic_local_timer_start(struct imsic_local_priv *lpriv, unsigned int cpu)
+{
+ lockdep_assert_held(&lpriv->lock);
+
+ if (!timer_pending(&lpriv->timer)) {
+ lpriv->timer.expires = jiffies + 1;
+ add_timer_on(&lpriv->timer, cpu);
+ }
+}
+#else
+static inline void __imsic_local_timer_start(struct imsic_local_priv *lpriv, unsigned int cpu)
+{
+}
+#endif
+
+void imsic_local_sync_all(bool force_all)
{
struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
unsigned long flags;
raw_spin_lock_irqsave(&lpriv->lock, flags);
- bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1);
- __imsic_local_sync(lpriv);
+
+ if (force_all)
+ bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1);
+ if (!__imsic_local_sync(lpriv))
+ __imsic_local_timer_start(lpriv, smp_processor_id());
+
raw_spin_unlock_irqrestore(&lpriv->lock, flags);
}
@@ -190,12 +231,7 @@ void imsic_local_delivery(bool enable)
#ifdef CONFIG_SMP
static void imsic_local_timer_callback(struct timer_list *timer)
{
- struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&lpriv->lock, flags);
- __imsic_local_sync(lpriv);
- raw_spin_unlock_irqrestore(&lpriv->lock, flags);
+ imsic_local_sync_all(false);
}
static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu)
@@ -216,14 +252,11 @@ static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu
*/
if (cpu_online(cpu)) {
if (cpu == smp_processor_id()) {
- __imsic_local_sync(lpriv);
- return;
+ if (__imsic_local_sync(lpriv))
+ return;
}
- if (!timer_pending(&lpriv->timer)) {
- lpriv->timer.expires = jiffies + 1;
- add_timer_on(&lpriv->timer, cpu);
- }
+ __imsic_local_timer_start(lpriv, cpu);
}
}
#else
@@ -278,8 +311,9 @@ void imsic_vector_unmask(struct imsic_vector *vec)
raw_spin_unlock(&lpriv->lock);
}
-static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsic_vector *vec,
- bool new_enable, struct imsic_vector *new_move)
+static bool imsic_vector_move_update(struct imsic_local_priv *lpriv,
+ struct imsic_vector *vec, bool is_old_vec,
+ bool new_enable, struct imsic_vector *move_vec)
{
unsigned long flags;
bool enabled;
@@ -289,7 +323,10 @@ static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsi
/* Update enable and move details */
enabled = READ_ONCE(vec->enable);
WRITE_ONCE(vec->enable, new_enable);
- WRITE_ONCE(vec->move, new_move);
+ if (is_old_vec)
+ WRITE_ONCE(vec->move_next, move_vec);
+ else
+ WRITE_ONCE(vec->move_prev, move_vec);
/* Mark the vector as dirty and synchronize */
bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
@@ -322,8 +359,8 @@ void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_ve
* interrupt on the old vector while device was being moved
* to the new vector.
*/
- enabled = imsic_vector_move_update(old_lpriv, old_vec, false, new_vec);
- imsic_vector_move_update(new_lpriv, new_vec, enabled, new_vec);
+ enabled = imsic_vector_move_update(old_lpriv, old_vec, true, false, new_vec);
+ imsic_vector_move_update(new_lpriv, new_vec, false, enabled, old_vec);
}
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
@@ -386,7 +423,8 @@ struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask
vec = &lpriv->vectors[local_id];
vec->hwirq = hwirq;
vec->enable = false;
- vec->move = NULL;
+ vec->move_next = NULL;
+ vec->move_prev = NULL;
return vec;
}
diff --git a/drivers/irqchip/irq-riscv-imsic-state.h b/drivers/irqchip/irq-riscv-imsic-state.h
index 391e44280827..f02842b84ed5 100644
--- a/drivers/irqchip/irq-riscv-imsic-state.h
+++ b/drivers/irqchip/irq-riscv-imsic-state.h
@@ -23,7 +23,8 @@ struct imsic_vector {
unsigned int hwirq;
/* Details accessed using local lock held */
bool enable;
- struct imsic_vector *move;
+ struct imsic_vector *move_next;
+ struct imsic_vector *move_prev;
};
struct imsic_local_priv {
@@ -74,7 +75,7 @@ static inline void __imsic_id_clear_enable(unsigned long id)
__imsic_eix_update(id, 1, false, false);
}
-void imsic_local_sync_all(void);
+void imsic_local_sync_all(bool force_all);
void imsic_local_delivery(bool enable);
void imsic_vector_mask(struct imsic_vector *vec);
@@ -87,7 +88,7 @@ static inline bool imsic_vector_isenabled(struct imsic_vector *vec)
static inline struct imsic_vector *imsic_vector_get_move(struct imsic_vector *vec)
{
- return READ_ONCE(vec->move);
+ return READ_ONCE(vec->move_prev);
}
void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 2b27d043921c..8859e8fe292a 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -971,6 +971,7 @@ config LEDS_ST1202
depends on I2C
depends on OF
select LEDS_TRIGGERS
+ select LEDS_TRIGGER_PATTERN
help
Say Y to enable support for LEDs connected to LED1202
LED driver chips accessed via the I2C bus.
diff --git a/drivers/leds/leds-st1202.c b/drivers/leds/leds-st1202.c
index 4cebc0203c22..ccea216c11f9 100644
--- a/drivers/leds/leds-st1202.c
+++ b/drivers/leds/leds-st1202.c
@@ -350,11 +350,11 @@ static int st1202_probe(struct i2c_client *client)
return ret;
chip->client = client;
- ret = st1202_dt_init(chip);
+ ret = st1202_setup(chip);
if (ret < 0)
return ret;
- ret = st1202_setup(chip);
+ ret = st1202_dt_init(chip);
if (ret < 0)
return ret;
diff --git a/drivers/leds/rgb/leds-pwm-multicolor.c b/drivers/leds/rgb/leds-pwm-multicolor.c
index f80a06cc31f8..1c7705bafdfc 100644
--- a/drivers/leds/rgb/leds-pwm-multicolor.c
+++ b/drivers/leds/rgb/leds-pwm-multicolor.c
@@ -141,8 +141,11 @@ static int led_pwm_mc_probe(struct platform_device *pdev)
/* init the multicolor's LED class device */
cdev = &priv->mc_cdev.led_cdev;
- fwnode_property_read_u32(mcnode, "max-brightness",
+ ret = fwnode_property_read_u32(mcnode, "max-brightness",
&cdev->max_brightness);
+ if (ret)
+ goto release_mcnode;
+
cdev->flags = LED_CORE_SUSPENDRESUME;
cdev->brightness_set_blocking = led_pwm_mc_set;
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index c15efe3e5078..4e048e08c4fd 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -68,6 +68,7 @@ struct led_netdev_data {
unsigned int last_activity;
unsigned long mode;
+ unsigned long blink_delay;
int link_speed;
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported_link_modes);
u8 duplex;
@@ -86,6 +87,10 @@ static void set_baseline_state(struct led_netdev_data *trigger_data)
/* Already validated, hw control is possible with the requested mode */
if (trigger_data->hw_control) {
led_cdev->hw_control_set(led_cdev, trigger_data->mode);
+ if (led_cdev->blink_set) {
+ led_cdev->blink_set(led_cdev, &trigger_data->blink_delay,
+ &trigger_data->blink_delay);
+ }
return;
}
@@ -454,10 +459,11 @@ static ssize_t interval_store(struct device *dev,
size_t size)
{
struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev);
+ struct led_classdev *led_cdev = trigger_data->led_cdev;
unsigned long value;
int ret;
- if (trigger_data->hw_control)
+ if (trigger_data->hw_control && !led_cdev->blink_set)
return -EINVAL;
ret = kstrtoul(buf, 0, &value);
@@ -466,9 +472,13 @@ static ssize_t interval_store(struct device *dev,
/* impose some basic bounds on the timer interval */
if (value >= 5 && value <= 10000) {
- cancel_delayed_work_sync(&trigger_data->work);
+ if (trigger_data->hw_control) {
+ trigger_data->blink_delay = value;
+ } else {
+ cancel_delayed_work_sync(&trigger_data->work);
- atomic_set(&trigger_data->interval, msecs_to_jiffies(value));
+ atomic_set(&trigger_data->interval, msecs_to_jiffies(value));
+ }
set_baseline_state(trigger_data); /* resets timer */
}
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index d3d26a2c9895..cb174e788a96 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -415,11 +415,12 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
mutex_lock(&con_mutex);
- if (of_parse_phandle_with_args(dev->of_node, "mboxes",
- "#mbox-cells", index, &spec)) {
+ ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells",
+ index, &spec);
+ if (ret) {
dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
mutex_unlock(&con_mutex);
- return ERR_PTR(-ENODEV);
+ return ERR_PTR(ret);
}
chan = ERR_PTR(-EPROBE_DEFER);
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 82102a4c5d68..49254d99a8ad 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -313,6 +313,10 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
int ret;
pchan = chan->con_priv;
+
+ if (pcc_chan_reg_read_modify_write(&pchan->plat_irq_ack))
+ return IRQ_NONE;
+
if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE &&
!pchan->chan_in_use)
return IRQ_NONE;
@@ -330,13 +334,16 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
return IRQ_NONE;
}
- if (pcc_chan_reg_read_modify_write(&pchan->plat_irq_ack))
- return IRQ_NONE;
-
+ /*
+ * Clear this flag after updating interrupt ack register and just
+ * before mbox_chan_received_data() which might call pcc_send_data()
+ * where the flag is set again to start new transfer. This is
+ * required to avoid any possible race in updatation of this flag.
+ */
+ pchan->chan_in_use = false;
mbox_chan_received_data(chan, NULL);
check_and_ack(pchan, chan);
- pchan->chan_in_use = false;
return IRQ_HANDLED;
}
@@ -412,8 +419,12 @@ int pcc_mbox_ioremap(struct mbox_chan *chan)
return -1;
pchan_info = chan->con_priv;
pcc_mbox_chan = &pchan_info->chan;
- pcc_mbox_chan->shmem = ioremap(pcc_mbox_chan->shmem_base_addr,
- pcc_mbox_chan->shmem_size);
+
+ pcc_mbox_chan->shmem = acpi_os_ioremap(pcc_mbox_chan->shmem_base_addr,
+ pcc_mbox_chan->shmem_size);
+ if (!pcc_mbox_chan->shmem)
+ return -ENXIO;
+
return 0;
}
EXPORT_SYMBOL_GPL(pcc_mbox_ioremap);
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
index 02a680c73979..bf0d7d58c8b0 100644
--- a/drivers/mcb/mcb-parse.c
+++ b/drivers/mcb/mcb-parse.c
@@ -96,7 +96,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
ret = mcb_device_register(bus, mdev);
if (ret < 0)
- goto err;
+ return ret;
return 0;
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index aab8240429b0..debc533a0365 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -68,6 +68,8 @@
#define LIST_DIRTY 1
#define LIST_SIZE 2
+#define SCAN_RESCHED_CYCLE 16
+
/*--------------------------------------------------------------*/
/*
@@ -2426,7 +2428,12 @@ static void __scan(struct dm_bufio_client *c)
atomic_long_dec(&c->need_shrink);
freed++;
- cond_resched();
+
+ if (unlikely(freed % SCAN_RESCHED_CYCLE == 0)) {
+ dm_bufio_unlock(c);
+ cond_resched();
+ dm_bufio_lock(c);
+ }
}
}
}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 9cb797a561d6..6cee5eac8b9e 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2899,6 +2899,27 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
return to_cblock(size);
}
+static bool can_resume(struct cache *cache)
+{
+ /*
+ * Disallow retrying the resume operation for devices that failed the
+ * first resume attempt, as the failure leaves the policy object partially
+ * initialized. Retrying could trigger BUG_ON when loading cache mappings
+ * into the incomplete policy object.
+ */
+ if (cache->sized && !cache->loaded_mappings) {
+ if (get_cache_mode(cache) != CM_WRITE)
+ DMERR("%s: unable to resume a failed-loaded cache, please check metadata.",
+ cache_device_name(cache));
+ else
+ DMERR("%s: unable to resume cache due to missing proper cache table reload",
+ cache_device_name(cache));
+ return false;
+ }
+
+ return true;
+}
+
static bool can_resize(struct cache *cache, dm_cblock_t new_size)
{
if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
@@ -2947,6 +2968,9 @@ static int cache_preresume(struct dm_target *ti)
struct cache *cache = ti->private;
dm_cblock_t csize = get_cache_dev_size(cache);
+ if (!can_resume(cache))
+ return -EINVAL;
+
/*
* Check to see if the cache has resized.
*/
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 65ab609ac0cb..9947962e80f2 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -5176,7 +5176,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
BUG_ON(!list_empty(&ic->wait_list));
- if (ic->mode == 'B')
+ if (ic->mode == 'B' && ic->bitmap_flush_work.work.func)
cancel_delayed_work_sync(&ic->bitmap_flush_work);
if (ic->metadata_wq)
destroy_workqueue(ic->metadata_wq);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 0ef5203387b2..4752966fdb3f 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -523,8 +523,9 @@ static char **realloc_argv(unsigned int *size, char **old_argv)
gfp = GFP_NOIO;
}
argv = kmalloc_array(new_size, sizeof(*argv), gfp);
- if (argv && old_argv) {
- memcpy(argv, old_argv, *size * sizeof(*argv));
+ if (argv) {
+ if (old_argv)
+ memcpy(argv, old_argv, *size * sizeof(*argv));
*size = new_size;
}
@@ -697,6 +698,10 @@ int dm_table_add_target(struct dm_table *t, const char *type,
DMERR("%s: zero-length target", dm_device_name(t->md));
return -EINVAL;
}
+ if (start + len < start || start + len > LLONG_MAX >> SECTOR_SHIFT) {
+ DMERR("%s: too large device", dm_device_name(t->md));
+ return -EINVAL;
+ }
ti->type = dm_get_target_type(type);
if (!ti->type) {
@@ -1177,7 +1182,7 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile,
t = dm_get_live_table(md, &srcu_idx);
if (!t)
- return 0;
+ goto put_live_table;
for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
@@ -1188,6 +1193,7 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile,
(void *)key);
}
+put_live_table:
dm_put_live_table(md, srcu_idx);
return 0;
}
diff --git a/drivers/md/dm-vdo/indexer/index-layout.c b/drivers/md/dm-vdo/indexer/index-layout.c
index af8fab83b0f3..61edf2b72427 100644
--- a/drivers/md/dm-vdo/indexer/index-layout.c
+++ b/drivers/md/dm-vdo/indexer/index-layout.c
@@ -54,7 +54,6 @@
* Each save also has a unique nonce.
*/
-#define MAGIC_SIZE 32
#define NONCE_INFO_SIZE 32
#define MAX_SAVES 2
@@ -98,9 +97,11 @@ enum region_type {
#define SUPER_VERSION_CURRENT 3
#define SUPER_VERSION_MAXIMUM 7
-static const u8 LAYOUT_MAGIC[MAGIC_SIZE] = "*ALBIREO*SINGLE*FILE*LAYOUT*001*";
+static const u8 LAYOUT_MAGIC[] = "*ALBIREO*SINGLE*FILE*LAYOUT*001*";
static const u64 REGION_MAGIC = 0x416c6252676e3031; /* 'AlbRgn01' */
+#define MAGIC_SIZE (sizeof(LAYOUT_MAGIC) - 1)
+
struct region_header {
u64 magic;
u64 region_blocks;
diff --git a/drivers/md/dm-vdo/vdo.c b/drivers/md/dm-vdo/vdo.c
index a7e32baab4af..80b608674022 100644
--- a/drivers/md/dm-vdo/vdo.c
+++ b/drivers/md/dm-vdo/vdo.c
@@ -31,9 +31,7 @@
#include <linux/completion.h>
#include <linux/device-mapper.h>
-#include <linux/kernel.h>
#include <linux/lz4.h>
-#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/types.h>
@@ -142,12 +140,6 @@ static void finish_vdo_request_queue(void *ptr)
vdo_unregister_allocating_thread();
}
-#ifdef MODULE
-#define MODULE_NAME THIS_MODULE->name
-#else
-#define MODULE_NAME "dm-vdo"
-#endif /* MODULE */
-
static const struct vdo_work_queue_type default_queue_type = {
.start = start_vdo_request_queue,
.finish = finish_vdo_request_queue,
@@ -559,8 +551,7 @@ int vdo_make(unsigned int instance, struct device_config *config, char **reason,
*vdo_ptr = vdo;
snprintf(vdo->thread_name_prefix, sizeof(vdo->thread_name_prefix),
- "%s%u", MODULE_NAME, instance);
- BUG_ON(vdo->thread_name_prefix[0] == '\0');
+ "vdo%u", instance);
result = vdo_allocate(vdo->thread_config.thread_count,
struct vdo_thread, __func__, &vdo->threads);
if (result != VDO_SUCCESS) {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4d1e42891d24..5ab7574c0c76 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1540,14 +1540,18 @@ static void __send_empty_flush(struct clone_info *ci)
{
struct dm_table *t = ci->map;
struct bio flush_bio;
+ blk_opf_t opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
+
+ if ((ci->io->orig_bio->bi_opf & (REQ_IDLE | REQ_SYNC)) ==
+ (REQ_IDLE | REQ_SYNC))
+ opf |= REQ_IDLE;
/*
* Use an on-stack bio for this, it's safe since we don't
* need to reference it after submit. It's just used as
* the basis for the clone(s).
*/
- bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0,
- REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC);
+ bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, opf);
ci->bio = &flush_bio;
ci->sector_count = 0;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 15829ab192d2..7373dff023d0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2199,14 +2199,9 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
if (!rdev_set_badblocks(rdev, sect, s, 0))
abort = 1;
}
- if (abort) {
- conf->recovery_disabled =
- mddev->recovery_disabled;
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_done_sync(mddev, r1_bio->sectors, 0);
- put_buf(r1_bio);
+ if (abort)
return 0;
- }
+
/* Try next page */
sectors -= s;
sect += s;
@@ -2345,10 +2340,21 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
int disks = conf->raid_disks * 2;
struct bio *wbio;
- if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
- /* ouch - failed to read all of that. */
- if (!fix_sync_read_error(r1_bio))
+ if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
+ /*
+ * ouch - failed to read all of that.
+ * No need to fix read error for check/repair
+ * because all member disks are read.
+ */
+ if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) ||
+ !fix_sync_read_error(r1_bio)) {
+ conf->recovery_disabled = mddev->recovery_disabled;
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ md_done_sync(mddev, r1_bio->sectors, 0);
+ put_buf(r1_bio);
return;
+ }
+ }
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
process_checks(r1_bio);
diff --git a/drivers/media/cec/core/cec-pin.c b/drivers/media/cec/core/cec-pin.c
index a70451d99ebc..f232c3df7ee1 100644
--- a/drivers/media/cec/core/cec-pin.c
+++ b/drivers/media/cec/core/cec-pin.c
@@ -873,19 +873,19 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
if (pin->wait_usecs > 150) {
pin->wait_usecs -= 100;
pin->timer_ts = ktime_add_us(ts, 100);
- hrtimer_forward_now(timer, ns_to_ktime(100000));
+ hrtimer_forward_now(timer, us_to_ktime(100));
return HRTIMER_RESTART;
}
if (pin->wait_usecs > 100) {
pin->wait_usecs /= 2;
pin->timer_ts = ktime_add_us(ts, pin->wait_usecs);
hrtimer_forward_now(timer,
- ns_to_ktime(pin->wait_usecs * 1000));
+ us_to_ktime(pin->wait_usecs));
return HRTIMER_RESTART;
}
pin->timer_ts = ktime_add_us(ts, pin->wait_usecs);
hrtimer_forward_now(timer,
- ns_to_ktime(pin->wait_usecs * 1000));
+ us_to_ktime(pin->wait_usecs));
pin->wait_usecs = 0;
return HRTIMER_RESTART;
}
@@ -1020,13 +1020,12 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
if (!adap->monitor_pin_cnt || usecs <= 150) {
pin->wait_usecs = 0;
pin->timer_ts = ktime_add_us(ts, usecs);
- hrtimer_forward_now(timer,
- ns_to_ktime(usecs * 1000));
+ hrtimer_forward_now(timer, us_to_ktime(usecs));
return HRTIMER_RESTART;
}
pin->wait_usecs = usecs - 100;
pin->timer_ts = ktime_add_us(ts, 100);
- hrtimer_forward_now(timer, ns_to_ktime(100000));
+ hrtimer_forward_now(timer, us_to_ktime(100));
return HRTIMER_RESTART;
}
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 8ba096b8ebca..85ecb2aeefdb 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -140,6 +140,7 @@ config VIDEO_IMX214
tristate "Sony IMX214 sensor support"
depends on GPIOLIB
select REGMAP_I2C
+ select V4L2_CCI_I2C
help
This is a Video4Linux2 sensor driver for the Sony
IMX214 camera.
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index ff7dfa0278a7..6e50b14f888f 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -195,6 +195,7 @@ struct adv7180_state;
#define ADV7180_FLAG_V2 BIT(1)
#define ADV7180_FLAG_MIPI_CSI2 BIT(2)
#define ADV7180_FLAG_I2P BIT(3)
+#define ADV7180_FLAG_TEST_PATTERN BIT(4)
struct adv7180_chip_info {
unsigned int flags;
@@ -682,11 +683,15 @@ static int adv7180_init_controls(struct adv7180_state *state)
ADV7180_HUE_MAX, 1, ADV7180_HUE_DEF);
v4l2_ctrl_new_custom(&state->ctrl_hdl, &adv7180_ctrl_fast_switch, NULL);
- v4l2_ctrl_new_std_menu_items(&state->ctrl_hdl, &adv7180_ctrl_ops,
- V4L2_CID_TEST_PATTERN,
- ARRAY_SIZE(test_pattern_menu) - 1,
- 0, ARRAY_SIZE(test_pattern_menu) - 1,
- test_pattern_menu);
+ if (state->chip_info->flags & ADV7180_FLAG_TEST_PATTERN) {
+ v4l2_ctrl_new_std_menu_items(&state->ctrl_hdl,
+ &adv7180_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(test_pattern_menu) - 1,
+ 0,
+ ARRAY_SIZE(test_pattern_menu) - 1,
+ test_pattern_menu);
+ }
state->sd.ctrl_handler = &state->ctrl_hdl;
if (state->ctrl_hdl.error) {
@@ -1221,7 +1226,7 @@ static const struct adv7180_chip_info adv7182_info = {
};
static const struct adv7180_chip_info adv7280_info = {
- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P,
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P | ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN3) |
@@ -1235,7 +1240,8 @@ static const struct adv7180_chip_info adv7280_info = {
};
static const struct adv7180_chip_info adv7280_m_info = {
- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P,
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P |
+ ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN3) |
@@ -1256,7 +1262,8 @@ static const struct adv7180_chip_info adv7280_m_info = {
};
static const struct adv7180_chip_info adv7281_info = {
- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2,
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 |
+ ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN7) |
@@ -1271,7 +1278,8 @@ static const struct adv7180_chip_info adv7281_info = {
};
static const struct adv7180_chip_info adv7281_m_info = {
- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2,
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 |
+ ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN3) |
@@ -1291,7 +1299,8 @@ static const struct adv7180_chip_info adv7281_m_info = {
};
static const struct adv7180_chip_info adv7281_ma_info = {
- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2,
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 |
+ ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN3) |
@@ -1316,7 +1325,7 @@ static const struct adv7180_chip_info adv7281_ma_info = {
};
static const struct adv7180_chip_info adv7282_info = {
- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P,
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P | ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN7) |
@@ -1331,7 +1340,8 @@ static const struct adv7180_chip_info adv7282_info = {
};
static const struct adv7180_chip_info adv7282_m_info = {
- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P,
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P |
+ ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN3) |
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index 6a393e18267f..ea5e294327e7 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -15,26 +15,152 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <media/media-entity.h>
+#include <media/v4l2-cci.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
-#define IMX214_REG_MODE_SELECT 0x0100
+#define IMX214_REG_MODE_SELECT CCI_REG8(0x0100)
#define IMX214_MODE_STANDBY 0x00
#define IMX214_MODE_STREAMING 0x01
+#define IMX214_REG_FAST_STANDBY_CTRL CCI_REG8(0x0106)
+
#define IMX214_DEFAULT_CLK_FREQ 24000000
-#define IMX214_DEFAULT_LINK_FREQ 480000000
+#define IMX214_DEFAULT_LINK_FREQ 600000000
+/* Keep wrong link frequency for backward compatibility */
+#define IMX214_DEFAULT_LINK_FREQ_LEGACY 480000000
#define IMX214_DEFAULT_PIXEL_RATE ((IMX214_DEFAULT_LINK_FREQ * 8LL) / 10)
#define IMX214_FPS 30
#define IMX214_MBUS_CODE MEDIA_BUS_FMT_SRGGB10_1X10
+/* V-TIMING internal */
+#define IMX214_REG_FRM_LENGTH_LINES CCI_REG16(0x0340)
+
/* Exposure control */
-#define IMX214_REG_EXPOSURE 0x0202
+#define IMX214_REG_EXPOSURE CCI_REG16(0x0202)
#define IMX214_EXPOSURE_MIN 0
#define IMX214_EXPOSURE_MAX 3184
#define IMX214_EXPOSURE_STEP 1
#define IMX214_EXPOSURE_DEFAULT 3184
+#define IMX214_REG_EXPOSURE_RATIO CCI_REG8(0x0222)
+#define IMX214_REG_SHORT_EXPOSURE CCI_REG16(0x0224)
+
+/* Analog gain control */
+#define IMX214_REG_ANALOG_GAIN CCI_REG16(0x0204)
+#define IMX214_REG_SHORT_ANALOG_GAIN CCI_REG16(0x0216)
+
+/* Digital gain control */
+#define IMX214_REG_DIG_GAIN_GREENR CCI_REG16(0x020e)
+#define IMX214_REG_DIG_GAIN_RED CCI_REG16(0x0210)
+#define IMX214_REG_DIG_GAIN_BLUE CCI_REG16(0x0212)
+#define IMX214_REG_DIG_GAIN_GREENB CCI_REG16(0x0214)
+
+#define IMX214_REG_ORIENTATION CCI_REG8(0x0101)
+
+#define IMX214_REG_MASK_CORR_FRAMES CCI_REG8(0x0105)
+#define IMX214_CORR_FRAMES_TRANSMIT 0
+#define IMX214_CORR_FRAMES_MASK 1
+
+#define IMX214_REG_CSI_DATA_FORMAT CCI_REG16(0x0112)
+#define IMX214_CSI_DATA_FORMAT_RAW8 0x0808
+#define IMX214_CSI_DATA_FORMAT_RAW10 0x0A0A
+#define IMX214_CSI_DATA_FORMAT_COMP6 0x0A06
+#define IMX214_CSI_DATA_FORMAT_COMP8 0x0A08
+
+#define IMX214_REG_CSI_LANE_MODE CCI_REG8(0x0114)
+#define IMX214_CSI_2_LANE_MODE 1
+#define IMX214_CSI_4_LANE_MODE 3
+
+#define IMX214_REG_EXCK_FREQ CCI_REG16(0x0136)
+#define IMX214_EXCK_FREQ(n) ((n) * 256) /* n expressed in MHz */
+
+#define IMX214_REG_TEMP_SENSOR_CONTROL CCI_REG8(0x0138)
+
+#define IMX214_REG_HDR_MODE CCI_REG8(0x0220)
+#define IMX214_HDR_MODE_OFF 0
+#define IMX214_HDR_MODE_ON 1
+
+#define IMX214_REG_HDR_RES_REDUCTION CCI_REG8(0x0221)
+#define IMX214_HDR_RES_REDU_THROUGH 0x11
+#define IMX214_HDR_RES_REDU_2_BINNING 0x22
+
+/* PLL settings */
+#define IMX214_REG_VTPXCK_DIV CCI_REG8(0x0301)
+#define IMX214_REG_VTSYCK_DIV CCI_REG8(0x0303)
+#define IMX214_REG_PREPLLCK_VT_DIV CCI_REG8(0x0305)
+#define IMX214_REG_PLL_VT_MPY CCI_REG16(0x0306)
+#define IMX214_REG_OPPXCK_DIV CCI_REG8(0x0309)
+#define IMX214_REG_OPSYCK_DIV CCI_REG8(0x030b)
+#define IMX214_REG_PLL_MULT_DRIV CCI_REG8(0x0310)
+#define IMX214_PLL_SINGLE 0
+#define IMX214_PLL_DUAL 1
+
+#define IMX214_REG_LINE_LENGTH_PCK CCI_REG16(0x0342)
+#define IMX214_REG_X_ADD_STA CCI_REG16(0x0344)
+#define IMX214_REG_Y_ADD_STA CCI_REG16(0x0346)
+#define IMX214_REG_X_ADD_END CCI_REG16(0x0348)
+#define IMX214_REG_Y_ADD_END CCI_REG16(0x034a)
+#define IMX214_REG_X_OUTPUT_SIZE CCI_REG16(0x034c)
+#define IMX214_REG_Y_OUTPUT_SIZE CCI_REG16(0x034e)
+#define IMX214_REG_X_EVEN_INC CCI_REG8(0x0381)
+#define IMX214_REG_X_ODD_INC CCI_REG8(0x0383)
+#define IMX214_REG_Y_EVEN_INC CCI_REG8(0x0385)
+#define IMX214_REG_Y_ODD_INC CCI_REG8(0x0387)
+
+#define IMX214_REG_SCALE_MODE CCI_REG8(0x0401)
+#define IMX214_SCALE_NONE 0
+#define IMX214_SCALE_HORIZONTAL 1
+#define IMX214_SCALE_FULL 2
+#define IMX214_REG_SCALE_M CCI_REG16(0x0404)
+
+#define IMX214_REG_DIG_CROP_X_OFFSET CCI_REG16(0x0408)
+#define IMX214_REG_DIG_CROP_Y_OFFSET CCI_REG16(0x040a)
+#define IMX214_REG_DIG_CROP_WIDTH CCI_REG16(0x040c)
+#define IMX214_REG_DIG_CROP_HEIGHT CCI_REG16(0x040e)
+
+#define IMX214_REG_REQ_LINK_BIT_RATE CCI_REG32(0x0820)
+#define IMX214_LINK_BIT_RATE_MBPS(n) ((n) << 16)
+
+/* Binning mode */
+#define IMX214_REG_BINNING_MODE CCI_REG8(0x0900)
+#define IMX214_BINNING_NONE 0
+#define IMX214_BINNING_ENABLE 1
+#define IMX214_REG_BINNING_TYPE CCI_REG8(0x0901)
+#define IMX214_REG_BINNING_WEIGHTING CCI_REG8(0x0902)
+#define IMX214_BINNING_AVERAGE 0x00
+#define IMX214_BINNING_SUMMED 0x01
+#define IMX214_BINNING_BAYER 0x02
+
+#define IMX214_REG_SING_DEF_CORR_EN CCI_REG8(0x0b06)
+#define IMX214_SING_DEF_CORR_OFF 0
+#define IMX214_SING_DEF_CORR_ON 1
+
+/* AWB control */
+#define IMX214_REG_ABS_GAIN_GREENR CCI_REG16(0x0b8e)
+#define IMX214_REG_ABS_GAIN_RED CCI_REG16(0x0b90)
+#define IMX214_REG_ABS_GAIN_BLUE CCI_REG16(0x0b92)
+#define IMX214_REG_ABS_GAIN_GREENB CCI_REG16(0x0b94)
+
+#define IMX214_REG_RMSC_NR_MODE CCI_REG8(0x3001)
+#define IMX214_REG_STATS_OUT_EN CCI_REG8(0x3013)
+#define IMX214_STATS_OUT_OFF 0
+#define IMX214_STATS_OUT_ON 1
+
+/* Chroma noise reduction */
+#define IMX214_REG_NML_NR_EN CCI_REG8(0x30a2)
+#define IMX214_NML_NR_OFF 0
+#define IMX214_NML_NR_ON 1
+
+#define IMX214_REG_EBD_SIZE_V CCI_REG8(0x5041)
+#define IMX214_EBD_NO 0
+#define IMX214_EBD_4_LINE 4
+
+#define IMX214_REG_RG_STATS_LMT CCI_REG16(0x6d12)
+#define IMX214_RG_STATS_LMT_10_BIT 0x03FF
+#define IMX214_RG_STATS_LMT_14_BIT 0x3FFF
+
+#define IMX214_REG_ATR_FAST_MOVE CCI_REG8(0x9300)
/* IMX214 native and active pixel array size */
#define IMX214_NATIVE_WIDTH 4224U
@@ -59,8 +185,6 @@ struct imx214 {
struct v4l2_subdev sd;
struct media_pad pad;
- struct v4l2_mbus_framefmt fmt;
- struct v4l2_rect crop;
struct v4l2_ctrl_handler ctrls;
struct v4l2_ctrl *pixel_rate;
@@ -71,353 +195,266 @@ struct imx214 {
struct regulator_bulk_data supplies[IMX214_NUM_SUPPLIES];
struct gpio_desc *enable_gpio;
-
- /*
- * Serialize control access, get/set format, get selection
- * and start streaming.
- */
- struct mutex mutex;
-};
-
-struct reg_8 {
- u16 addr;
- u8 val;
-};
-
-enum {
- IMX214_TABLE_WAIT_MS = 0,
- IMX214_TABLE_END,
- IMX214_MAX_RETRIES,
- IMX214_WAIT_MS
};
/*From imx214_mode_tbls.h*/
-static const struct reg_8 mode_4096x2304[] = {
- {0x0114, 0x03},
- {0x0220, 0x00},
- {0x0221, 0x11},
- {0x0222, 0x01},
- {0x0340, 0x0C},
- {0x0341, 0x7A},
- {0x0342, 0x13},
- {0x0343, 0x90},
- {0x0344, 0x00},
- {0x0345, 0x38},
- {0x0346, 0x01},
- {0x0347, 0x98},
- {0x0348, 0x10},
- {0x0349, 0x37},
- {0x034A, 0x0A},
- {0x034B, 0x97},
- {0x0381, 0x01},
- {0x0383, 0x01},
- {0x0385, 0x01},
- {0x0387, 0x01},
- {0x0900, 0x00},
- {0x0901, 0x00},
- {0x0902, 0x00},
- {0x3000, 0x35},
- {0x3054, 0x01},
- {0x305C, 0x11},
-
- {0x0112, 0x0A},
- {0x0113, 0x0A},
- {0x034C, 0x10},
- {0x034D, 0x00},
- {0x034E, 0x09},
- {0x034F, 0x00},
- {0x0401, 0x00},
- {0x0404, 0x00},
- {0x0405, 0x10},
- {0x0408, 0x00},
- {0x0409, 0x00},
- {0x040A, 0x00},
- {0x040B, 0x00},
- {0x040C, 0x10},
- {0x040D, 0x00},
- {0x040E, 0x09},
- {0x040F, 0x00},
-
- {0x0301, 0x05},
- {0x0303, 0x02},
- {0x0305, 0x03},
- {0x0306, 0x00},
- {0x0307, 0x96},
- {0x0309, 0x0A},
- {0x030B, 0x01},
- {0x0310, 0x00},
-
- {0x0820, 0x12},
- {0x0821, 0xC0},
- {0x0822, 0x00},
- {0x0823, 0x00},
-
- {0x3A03, 0x09},
- {0x3A04, 0x50},
- {0x3A05, 0x01},
-
- {0x0B06, 0x01},
- {0x30A2, 0x00},
-
- {0x30B4, 0x00},
-
- {0x3A02, 0xFF},
-
- {0x3011, 0x00},
- {0x3013, 0x01},
-
- {0x0202, 0x0C},
- {0x0203, 0x70},
- {0x0224, 0x01},
- {0x0225, 0xF4},
-
- {0x0204, 0x00},
- {0x0205, 0x00},
- {0x020E, 0x01},
- {0x020F, 0x00},
- {0x0210, 0x01},
- {0x0211, 0x00},
- {0x0212, 0x01},
- {0x0213, 0x00},
- {0x0214, 0x01},
- {0x0215, 0x00},
- {0x0216, 0x00},
- {0x0217, 0x00},
-
- {0x4170, 0x00},
- {0x4171, 0x10},
- {0x4176, 0x00},
- {0x4177, 0x3C},
- {0xAE20, 0x04},
- {0xAE21, 0x5C},
-
- {IMX214_TABLE_WAIT_MS, 10},
- {0x0138, 0x01},
- {IMX214_TABLE_END, 0x00}
+static const struct cci_reg_sequence mode_4096x2304[] = {
+ { IMX214_REG_HDR_MODE, IMX214_HDR_MODE_OFF },
+ { IMX214_REG_HDR_RES_REDUCTION, IMX214_HDR_RES_REDU_THROUGH },
+ { IMX214_REG_EXPOSURE_RATIO, 1 },
+ { IMX214_REG_FRM_LENGTH_LINES, 3194 },
+ { IMX214_REG_LINE_LENGTH_PCK, 5008 },
+ { IMX214_REG_X_ADD_STA, 56 },
+ { IMX214_REG_Y_ADD_STA, 408 },
+ { IMX214_REG_X_ADD_END, 4151 },
+ { IMX214_REG_Y_ADD_END, 2711 },
+ { IMX214_REG_X_EVEN_INC, 1 },
+ { IMX214_REG_X_ODD_INC, 1 },
+ { IMX214_REG_Y_EVEN_INC, 1 },
+ { IMX214_REG_Y_ODD_INC, 1 },
+ { IMX214_REG_BINNING_MODE, IMX214_BINNING_NONE },
+ { IMX214_REG_BINNING_TYPE, 0 },
+ { IMX214_REG_BINNING_WEIGHTING, IMX214_BINNING_AVERAGE },
+ { CCI_REG8(0x3000), 0x35 },
+ { CCI_REG8(0x3054), 0x01 },
+ { CCI_REG8(0x305C), 0x11 },
+
+ { IMX214_REG_CSI_DATA_FORMAT, IMX214_CSI_DATA_FORMAT_RAW10 },
+ { IMX214_REG_X_OUTPUT_SIZE, 4096 },
+ { IMX214_REG_Y_OUTPUT_SIZE, 2304 },
+ { IMX214_REG_SCALE_MODE, IMX214_SCALE_NONE },
+ { IMX214_REG_SCALE_M, 2 },
+ { IMX214_REG_DIG_CROP_X_OFFSET, 0 },
+ { IMX214_REG_DIG_CROP_Y_OFFSET, 0 },
+ { IMX214_REG_DIG_CROP_WIDTH, 4096 },
+ { IMX214_REG_DIG_CROP_HEIGHT, 2304 },
+
+ { IMX214_REG_VTPXCK_DIV, 5 },
+ { IMX214_REG_VTSYCK_DIV, 2 },
+ { IMX214_REG_PREPLLCK_VT_DIV, 3 },
+ { IMX214_REG_PLL_VT_MPY, 150 },
+ { IMX214_REG_OPPXCK_DIV, 10 },
+ { IMX214_REG_OPSYCK_DIV, 1 },
+ { IMX214_REG_PLL_MULT_DRIV, IMX214_PLL_SINGLE },
+
+ { IMX214_REG_REQ_LINK_BIT_RATE, IMX214_LINK_BIT_RATE_MBPS(4800) },
+
+ { CCI_REG8(0x3A03), 0x09 },
+ { CCI_REG8(0x3A04), 0x50 },
+ { CCI_REG8(0x3A05), 0x01 },
+
+ { IMX214_REG_SING_DEF_CORR_EN, IMX214_SING_DEF_CORR_ON },
+ { IMX214_REG_NML_NR_EN, IMX214_NML_NR_OFF },
+
+ { CCI_REG8(0x30B4), 0x00 },
+
+ { CCI_REG8(0x3A02), 0xFF },
+
+ { CCI_REG8(0x3011), 0x00 },
+ { IMX214_REG_STATS_OUT_EN, IMX214_STATS_OUT_ON },
+
+ { IMX214_REG_EXPOSURE, IMX214_EXPOSURE_DEFAULT },
+ { IMX214_REG_SHORT_EXPOSURE, 500 },
+
+ { IMX214_REG_ANALOG_GAIN, 0 },
+ { IMX214_REG_DIG_GAIN_GREENR, 256 },
+ { IMX214_REG_DIG_GAIN_RED, 256 },
+ { IMX214_REG_DIG_GAIN_BLUE, 256 },
+ { IMX214_REG_DIG_GAIN_GREENB, 256 },
+ { IMX214_REG_SHORT_ANALOG_GAIN, 0 },
+
+ { CCI_REG8(0x4170), 0x00 },
+ { CCI_REG8(0x4171), 0x10 },
+ { CCI_REG8(0x4176), 0x00 },
+ { CCI_REG8(0x4177), 0x3C },
+ { CCI_REG8(0xAE20), 0x04 },
+ { CCI_REG8(0xAE21), 0x5C },
};
-static const struct reg_8 mode_1920x1080[] = {
- {0x0114, 0x03},
- {0x0220, 0x00},
- {0x0221, 0x11},
- {0x0222, 0x01},
- {0x0340, 0x0C},
- {0x0341, 0x7A},
- {0x0342, 0x13},
- {0x0343, 0x90},
- {0x0344, 0x04},
- {0x0345, 0x78},
- {0x0346, 0x03},
- {0x0347, 0xFC},
- {0x0348, 0x0B},
- {0x0349, 0xF7},
- {0x034A, 0x08},
- {0x034B, 0x33},
- {0x0381, 0x01},
- {0x0383, 0x01},
- {0x0385, 0x01},
- {0x0387, 0x01},
- {0x0900, 0x00},
- {0x0901, 0x00},
- {0x0902, 0x00},
- {0x3000, 0x35},
- {0x3054, 0x01},
- {0x305C, 0x11},
-
- {0x0112, 0x0A},
- {0x0113, 0x0A},
- {0x034C, 0x07},
- {0x034D, 0x80},
- {0x034E, 0x04},
- {0x034F, 0x38},
- {0x0401, 0x00},
- {0x0404, 0x00},
- {0x0405, 0x10},
- {0x0408, 0x00},
- {0x0409, 0x00},
- {0x040A, 0x00},
- {0x040B, 0x00},
- {0x040C, 0x07},
- {0x040D, 0x80},
- {0x040E, 0x04},
- {0x040F, 0x38},
-
- {0x0301, 0x05},
- {0x0303, 0x02},
- {0x0305, 0x03},
- {0x0306, 0x00},
- {0x0307, 0x96},
- {0x0309, 0x0A},
- {0x030B, 0x01},
- {0x0310, 0x00},
-
- {0x0820, 0x12},
- {0x0821, 0xC0},
- {0x0822, 0x00},
- {0x0823, 0x00},
-
- {0x3A03, 0x04},
- {0x3A04, 0xF8},
- {0x3A05, 0x02},
-
- {0x0B06, 0x01},
- {0x30A2, 0x00},
-
- {0x30B4, 0x00},
-
- {0x3A02, 0xFF},
-
- {0x3011, 0x00},
- {0x3013, 0x01},
-
- {0x0202, 0x0C},
- {0x0203, 0x70},
- {0x0224, 0x01},
- {0x0225, 0xF4},
-
- {0x0204, 0x00},
- {0x0205, 0x00},
- {0x020E, 0x01},
- {0x020F, 0x00},
- {0x0210, 0x01},
- {0x0211, 0x00},
- {0x0212, 0x01},
- {0x0213, 0x00},
- {0x0214, 0x01},
- {0x0215, 0x00},
- {0x0216, 0x00},
- {0x0217, 0x00},
-
- {0x4170, 0x00},
- {0x4171, 0x10},
- {0x4176, 0x00},
- {0x4177, 0x3C},
- {0xAE20, 0x04},
- {0xAE21, 0x5C},
-
- {IMX214_TABLE_WAIT_MS, 10},
- {0x0138, 0x01},
- {IMX214_TABLE_END, 0x00}
+static const struct cci_reg_sequence mode_1920x1080[] = {
+ { IMX214_REG_HDR_MODE, IMX214_HDR_MODE_OFF },
+ { IMX214_REG_HDR_RES_REDUCTION, IMX214_HDR_RES_REDU_THROUGH },
+ { IMX214_REG_EXPOSURE_RATIO, 1 },
+ { IMX214_REG_FRM_LENGTH_LINES, 3194 },
+ { IMX214_REG_LINE_LENGTH_PCK, 5008 },
+ { IMX214_REG_X_ADD_STA, 1144 },
+ { IMX214_REG_Y_ADD_STA, 1020 },
+ { IMX214_REG_X_ADD_END, 3063 },
+ { IMX214_REG_Y_ADD_END, 2099 },
+ { IMX214_REG_X_EVEN_INC, 1 },
+ { IMX214_REG_X_ODD_INC, 1 },
+ { IMX214_REG_Y_EVEN_INC, 1 },
+ { IMX214_REG_Y_ODD_INC, 1 },
+ { IMX214_REG_BINNING_MODE, IMX214_BINNING_NONE },
+ { IMX214_REG_BINNING_TYPE, 0 },
+ { IMX214_REG_BINNING_WEIGHTING, IMX214_BINNING_AVERAGE },
+ { CCI_REG8(0x3000), 0x35 },
+ { CCI_REG8(0x3054), 0x01 },
+ { CCI_REG8(0x305C), 0x11 },
+
+ { IMX214_REG_CSI_DATA_FORMAT, IMX214_CSI_DATA_FORMAT_RAW10 },
+ { IMX214_REG_X_OUTPUT_SIZE, 1920 },
+ { IMX214_REG_Y_OUTPUT_SIZE, 1080 },
+ { IMX214_REG_SCALE_MODE, IMX214_SCALE_NONE },
+ { IMX214_REG_SCALE_M, 2 },
+ { IMX214_REG_DIG_CROP_X_OFFSET, 0 },
+ { IMX214_REG_DIG_CROP_Y_OFFSET, 0 },
+ { IMX214_REG_DIG_CROP_WIDTH, 1920 },
+ { IMX214_REG_DIG_CROP_HEIGHT, 1080 },
+
+ { IMX214_REG_VTPXCK_DIV, 5 },
+ { IMX214_REG_VTSYCK_DIV, 2 },
+ { IMX214_REG_PREPLLCK_VT_DIV, 3 },
+ { IMX214_REG_PLL_VT_MPY, 150 },
+ { IMX214_REG_OPPXCK_DIV, 10 },
+ { IMX214_REG_OPSYCK_DIV, 1 },
+ { IMX214_REG_PLL_MULT_DRIV, IMX214_PLL_SINGLE },
+
+ { IMX214_REG_REQ_LINK_BIT_RATE, IMX214_LINK_BIT_RATE_MBPS(4800) },
+
+ { CCI_REG8(0x3A03), 0x04 },
+ { CCI_REG8(0x3A04), 0xF8 },
+ { CCI_REG8(0x3A05), 0x02 },
+
+ { IMX214_REG_SING_DEF_CORR_EN, IMX214_SING_DEF_CORR_ON },
+ { IMX214_REG_NML_NR_EN, IMX214_NML_NR_OFF },
+
+ { CCI_REG8(0x30B4), 0x00 },
+
+ { CCI_REG8(0x3A02), 0xFF },
+
+ { CCI_REG8(0x3011), 0x00 },
+ { IMX214_REG_STATS_OUT_EN, IMX214_STATS_OUT_ON },
+
+ { IMX214_REG_EXPOSURE, IMX214_EXPOSURE_DEFAULT },
+ { IMX214_REG_SHORT_EXPOSURE, 500 },
+
+ { IMX214_REG_ANALOG_GAIN, 0 },
+ { IMX214_REG_DIG_GAIN_GREENR, 256 },
+ { IMX214_REG_DIG_GAIN_RED, 256 },
+ { IMX214_REG_DIG_GAIN_BLUE, 256 },
+ { IMX214_REG_DIG_GAIN_GREENB, 256 },
+ { IMX214_REG_SHORT_ANALOG_GAIN, 0 },
+
+ { CCI_REG8(0x4170), 0x00 },
+ { CCI_REG8(0x4171), 0x10 },
+ { CCI_REG8(0x4176), 0x00 },
+ { CCI_REG8(0x4177), 0x3C },
+ { CCI_REG8(0xAE20), 0x04 },
+ { CCI_REG8(0xAE21), 0x5C },
};
-static const struct reg_8 mode_table_common[] = {
+static const struct cci_reg_sequence mode_table_common[] = {
/* software reset */
/* software standby settings */
- {0x0100, 0x00},
+ { IMX214_REG_MODE_SELECT, IMX214_MODE_STANDBY },
/* ATR setting */
- {0x9300, 0x02},
+ { IMX214_REG_ATR_FAST_MOVE, 2 },
/* external clock setting */
- {0x0136, 0x18},
- {0x0137, 0x00},
+ { IMX214_REG_EXCK_FREQ, IMX214_EXCK_FREQ(IMX214_DEFAULT_CLK_FREQ / 1000000) },
/* global setting */
/* basic config */
- {0x0101, 0x00},
- {0x0105, 0x01},
- {0x0106, 0x01},
- {0x4550, 0x02},
- {0x4601, 0x00},
- {0x4642, 0x05},
- {0x6227, 0x11},
- {0x6276, 0x00},
- {0x900E, 0x06},
- {0xA802, 0x90},
- {0xA803, 0x11},
- {0xA804, 0x62},
- {0xA805, 0x77},
- {0xA806, 0xAE},
- {0xA807, 0x34},
- {0xA808, 0xAE},
- {0xA809, 0x35},
- {0xA80A, 0x62},
- {0xA80B, 0x83},
- {0xAE33, 0x00},
+ { IMX214_REG_ORIENTATION, 0 },
+ { IMX214_REG_MASK_CORR_FRAMES, IMX214_CORR_FRAMES_MASK },
+ { IMX214_REG_FAST_STANDBY_CTRL, 1 },
+ { CCI_REG8(0x4550), 0x02 },
+ { CCI_REG8(0x4601), 0x00 },
+ { CCI_REG8(0x4642), 0x05 },
+ { CCI_REG8(0x6227), 0x11 },
+ { CCI_REG8(0x6276), 0x00 },
+ { CCI_REG8(0x900E), 0x06 },
+ { CCI_REG8(0xA802), 0x90 },
+ { CCI_REG8(0xA803), 0x11 },
+ { CCI_REG8(0xA804), 0x62 },
+ { CCI_REG8(0xA805), 0x77 },
+ { CCI_REG8(0xA806), 0xAE },
+ { CCI_REG8(0xA807), 0x34 },
+ { CCI_REG8(0xA808), 0xAE },
+ { CCI_REG8(0xA809), 0x35 },
+ { CCI_REG8(0xA80A), 0x62 },
+ { CCI_REG8(0xA80B), 0x83 },
+ { CCI_REG8(0xAE33), 0x00 },
/* analog setting */
- {0x4174, 0x00},
- {0x4175, 0x11},
- {0x4612, 0x29},
- {0x461B, 0x12},
- {0x461F, 0x06},
- {0x4635, 0x07},
- {0x4637, 0x30},
- {0x463F, 0x18},
- {0x4641, 0x0D},
- {0x465B, 0x12},
- {0x465F, 0x11},
- {0x4663, 0x11},
- {0x4667, 0x0F},
- {0x466F, 0x0F},
- {0x470E, 0x09},
- {0x4909, 0xAB},
- {0x490B, 0x95},
- {0x4915, 0x5D},
- {0x4A5F, 0xFF},
- {0x4A61, 0xFF},
- {0x4A73, 0x62},
- {0x4A85, 0x00},
- {0x4A87, 0xFF},
+ { CCI_REG8(0x4174), 0x00 },
+ { CCI_REG8(0x4175), 0x11 },
+ { CCI_REG8(0x4612), 0x29 },
+ { CCI_REG8(0x461B), 0x12 },
+ { CCI_REG8(0x461F), 0x06 },
+ { CCI_REG8(0x4635), 0x07 },
+ { CCI_REG8(0x4637), 0x30 },
+ { CCI_REG8(0x463F), 0x18 },
+ { CCI_REG8(0x4641), 0x0D },
+ { CCI_REG8(0x465B), 0x12 },
+ { CCI_REG8(0x465F), 0x11 },
+ { CCI_REG8(0x4663), 0x11 },
+ { CCI_REG8(0x4667), 0x0F },
+ { CCI_REG8(0x466F), 0x0F },
+ { CCI_REG8(0x470E), 0x09 },
+ { CCI_REG8(0x4909), 0xAB },
+ { CCI_REG8(0x490B), 0x95 },
+ { CCI_REG8(0x4915), 0x5D },
+ { CCI_REG8(0x4A5F), 0xFF },
+ { CCI_REG8(0x4A61), 0xFF },
+ { CCI_REG8(0x4A73), 0x62 },
+ { CCI_REG8(0x4A85), 0x00 },
+ { CCI_REG8(0x4A87), 0xFF },
/* embedded data */
- {0x5041, 0x04},
- {0x583C, 0x04},
- {0x620E, 0x04},
- {0x6EB2, 0x01},
- {0x6EB3, 0x00},
- {0x9300, 0x02},
+ { IMX214_REG_EBD_SIZE_V, IMX214_EBD_4_LINE },
+ { CCI_REG8(0x583C), 0x04 },
+ { CCI_REG8(0x620E), 0x04 },
+ { CCI_REG8(0x6EB2), 0x01 },
+ { CCI_REG8(0x6EB3), 0x00 },
+ { IMX214_REG_ATR_FAST_MOVE, 2 },
/* imagequality */
/* HDR setting */
- {0x3001, 0x07},
- {0x6D12, 0x3F},
- {0x6D13, 0xFF},
- {0x9344, 0x03},
- {0x9706, 0x10},
- {0x9707, 0x03},
- {0x9708, 0x03},
- {0x9E04, 0x01},
- {0x9E05, 0x00},
- {0x9E0C, 0x01},
- {0x9E0D, 0x02},
- {0x9E24, 0x00},
- {0x9E25, 0x8C},
- {0x9E26, 0x00},
- {0x9E27, 0x94},
- {0x9E28, 0x00},
- {0x9E29, 0x96},
+ { IMX214_REG_RMSC_NR_MODE, 0x07 },
+ { IMX214_REG_RG_STATS_LMT, IMX214_RG_STATS_LMT_14_BIT },
+ { CCI_REG8(0x9344), 0x03 },
+ { CCI_REG8(0x9706), 0x10 },
+ { CCI_REG8(0x9707), 0x03 },
+ { CCI_REG8(0x9708), 0x03 },
+ { CCI_REG8(0x9E04), 0x01 },
+ { CCI_REG8(0x9E05), 0x00 },
+ { CCI_REG8(0x9E0C), 0x01 },
+ { CCI_REG8(0x9E0D), 0x02 },
+ { CCI_REG8(0x9E24), 0x00 },
+ { CCI_REG8(0x9E25), 0x8C },
+ { CCI_REG8(0x9E26), 0x00 },
+ { CCI_REG8(0x9E27), 0x94 },
+ { CCI_REG8(0x9E28), 0x00 },
+ { CCI_REG8(0x9E29), 0x96 },
/* CNR parameter setting */
- {0x69DB, 0x01},
+ { CCI_REG8(0x69DB), 0x01 },
/* Moire reduction */
- {0x6957, 0x01},
+ { CCI_REG8(0x6957), 0x01 },
/* image enhancement */
- {0x6987, 0x17},
- {0x698A, 0x03},
- {0x698B, 0x03},
+ { CCI_REG8(0x6987), 0x17 },
+ { CCI_REG8(0x698A), 0x03 },
+ { CCI_REG8(0x698B), 0x03 },
/* white balanace */
- {0x0B8E, 0x01},
- {0x0B8F, 0x00},
- {0x0B90, 0x01},
- {0x0B91, 0x00},
- {0x0B92, 0x01},
- {0x0B93, 0x00},
- {0x0B94, 0x01},
- {0x0B95, 0x00},
+ { IMX214_REG_ABS_GAIN_GREENR, 0x0100 },
+ { IMX214_REG_ABS_GAIN_RED, 0x0100 },
+ { IMX214_REG_ABS_GAIN_BLUE, 0x0100 },
+ { IMX214_REG_ABS_GAIN_GREENB, 0x0100 },
/* ATR setting */
- {0x6E50, 0x00},
- {0x6E51, 0x32},
- {0x9340, 0x00},
- {0x9341, 0x3C},
- {0x9342, 0x03},
- {0x9343, 0xFF},
- {IMX214_TABLE_END, 0x00}
+ { CCI_REG8(0x6E50), 0x00 },
+ { CCI_REG8(0x6E51), 0x32 },
+ { CCI_REG8(0x9340), 0x00 },
+ { CCI_REG8(0x9341), 0x3C },
+ { CCI_REG8(0x9342), 0x03 },
+ { CCI_REG8(0x9343), 0xFF },
};
/*
@@ -427,16 +464,19 @@ static const struct reg_8 mode_table_common[] = {
static const struct imx214_mode {
u32 width;
u32 height;
- const struct reg_8 *reg_table;
+ unsigned int num_of_regs;
+ const struct cci_reg_sequence *reg_table;
} imx214_modes[] = {
{
.width = 4096,
.height = 2304,
+ .num_of_regs = ARRAY_SIZE(mode_4096x2304),
.reg_table = mode_4096x2304,
},
{
.width = 1920,
.height = 1080,
+ .num_of_regs = ARRAY_SIZE(mode_1920x1080),
.reg_table = mode_1920x1080,
},
};
@@ -490,6 +530,22 @@ static int __maybe_unused imx214_power_off(struct device *dev)
return 0;
}
+static void imx214_update_pad_format(struct imx214 *imx214,
+ const struct imx214_mode *mode,
+ struct v4l2_mbus_framefmt *fmt, u32 code)
+{
+ fmt->code = IMX214_MBUS_CODE;
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
+ fmt->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
+ fmt->colorspace,
+ fmt->ycbcr_enc);
+ fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
+}
+
static int imx214_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
@@ -549,52 +605,6 @@ static const struct v4l2_subdev_core_ops imx214_core_ops = {
#endif
};
-static struct v4l2_mbus_framefmt *
-__imx214_get_pad_format(struct imx214 *imx214,
- struct v4l2_subdev_state *sd_state,
- unsigned int pad,
- enum v4l2_subdev_format_whence which)
-{
- switch (which) {
- case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_state_get_format(sd_state, pad);
- case V4L2_SUBDEV_FORMAT_ACTIVE:
- return &imx214->fmt;
- default:
- return NULL;
- }
-}
-
-static int imx214_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *format)
-{
- struct imx214 *imx214 = to_imx214(sd);
-
- mutex_lock(&imx214->mutex);
- format->format = *__imx214_get_pad_format(imx214, sd_state,
- format->pad,
- format->which);
- mutex_unlock(&imx214->mutex);
-
- return 0;
-}
-
-static struct v4l2_rect *
-__imx214_get_pad_crop(struct imx214 *imx214,
- struct v4l2_subdev_state *sd_state,
- unsigned int pad, enum v4l2_subdev_format_whence which)
-{
- switch (which) {
- case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_state_get_crop(sd_state, pad);
- case V4L2_SUBDEV_FORMAT_ACTIVE:
- return &imx214->crop;
- default:
- return NULL;
- }
-}
-
static int imx214_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
@@ -604,34 +614,20 @@ static int imx214_set_format(struct v4l2_subdev *sd,
struct v4l2_rect *__crop;
const struct imx214_mode *mode;
- mutex_lock(&imx214->mutex);
-
- __crop = __imx214_get_pad_crop(imx214, sd_state, format->pad,
- format->which);
-
mode = v4l2_find_nearest_size(imx214_modes,
ARRAY_SIZE(imx214_modes), width, height,
format->format.width,
format->format.height);
- __crop->width = mode->width;
- __crop->height = mode->height;
-
- __format = __imx214_get_pad_format(imx214, sd_state, format->pad,
- format->which);
- __format->width = __crop->width;
- __format->height = __crop->height;
- __format->code = IMX214_MBUS_CODE;
- __format->field = V4L2_FIELD_NONE;
- __format->colorspace = V4L2_COLORSPACE_SRGB;
- __format->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(__format->colorspace);
- __format->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
- __format->colorspace, __format->ycbcr_enc);
- __format->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(__format->colorspace);
+ imx214_update_pad_format(imx214, mode, &format->format,
+ format->format.code);
+ __format = v4l2_subdev_state_get_format(sd_state, 0);
- format->format = *__format;
+ *__format = format->format;
- mutex_unlock(&imx214->mutex);
+ __crop = v4l2_subdev_state_get_crop(sd_state, 0);
+ __crop->width = mode->width;
+ __crop->height = mode->height;
return 0;
}
@@ -640,14 +636,9 @@ static int imx214_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
- struct imx214 *imx214 = to_imx214(sd);
-
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- mutex_lock(&imx214->mutex);
- sel->r = *__imx214_get_pad_crop(imx214, sd_state, sel->pad,
- sel->which);
- mutex_unlock(&imx214->mutex);
+ sel->r = *v4l2_subdev_state_get_crop(sd_state, 0);
return 0;
case V4L2_SEL_TGT_NATIVE_SIZE:
@@ -687,7 +678,6 @@ static int imx214_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct imx214 *imx214 = container_of(ctrl->handler,
struct imx214, ctrls);
- u8 vals[2];
int ret;
/*
@@ -699,12 +689,7 @@ static int imx214_set_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_EXPOSURE:
- vals[1] = ctrl->val;
- vals[0] = ctrl->val >> 8;
- ret = regmap_bulk_write(imx214->regmap, IMX214_REG_EXPOSURE, vals, 2);
- if (ret < 0)
- dev_err(imx214->dev, "Error %d\n", ret);
- ret = 0;
+ cci_write(imx214->regmap, IMX214_REG_EXPOSURE, ctrl->val, &ret);
break;
default:
@@ -790,76 +775,52 @@ static int imx214_ctrls_init(struct imx214 *imx214)
return 0;
};
-#define MAX_CMD 4
-static int imx214_write_table(struct imx214 *imx214,
- const struct reg_8 table[])
-{
- u8 vals[MAX_CMD];
- int i;
- int ret;
-
- for (; table->addr != IMX214_TABLE_END ; table++) {
- if (table->addr == IMX214_TABLE_WAIT_MS) {
- usleep_range(table->val * 1000,
- table->val * 1000 + 500);
- continue;
- }
-
- for (i = 0; i < MAX_CMD; i++) {
- if (table[i].addr != (table[0].addr + i))
- break;
- vals[i] = table[i].val;
- }
-
- ret = regmap_bulk_write(imx214->regmap, table->addr, vals, i);
-
- if (ret) {
- dev_err(imx214->dev, "write_table error: %d\n", ret);
- return ret;
- }
-
- table += i - 1;
- }
-
- return 0;
-}
-
static int imx214_start_streaming(struct imx214 *imx214)
{
+ const struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_subdev_state *state;
const struct imx214_mode *mode;
int ret;
- mutex_lock(&imx214->mutex);
- ret = imx214_write_table(imx214, mode_table_common);
+ ret = cci_multi_reg_write(imx214->regmap, mode_table_common,
+ ARRAY_SIZE(mode_table_common), NULL);
if (ret < 0) {
dev_err(imx214->dev, "could not sent common table %d\n", ret);
- goto error;
+ return ret;
}
- mode = v4l2_find_nearest_size(imx214_modes,
- ARRAY_SIZE(imx214_modes), width, height,
- imx214->fmt.width, imx214->fmt.height);
- ret = imx214_write_table(imx214, mode->reg_table);
+ ret = cci_write(imx214->regmap, IMX214_REG_CSI_LANE_MODE,
+ IMX214_CSI_4_LANE_MODE, NULL);
+ if (ret) {
+ dev_err(imx214->dev, "failed to configure lanes\n");
+ return ret;
+ }
+
+ state = v4l2_subdev_get_locked_active_state(&imx214->sd);
+ fmt = v4l2_subdev_state_get_format(state, 0);
+ mode = v4l2_find_nearest_size(imx214_modes, ARRAY_SIZE(imx214_modes),
+ width, height, fmt->width, fmt->height);
+ ret = cci_multi_reg_write(imx214->regmap, mode->reg_table,
+ mode->num_of_regs, NULL);
if (ret < 0) {
dev_err(imx214->dev, "could not sent mode table %d\n", ret);
- goto error;
+ return ret;
}
+
+ usleep_range(10000, 10500);
+
+ cci_write(imx214->regmap, IMX214_REG_TEMP_SENSOR_CONTROL, 0x01, NULL);
+
ret = __v4l2_ctrl_handler_setup(&imx214->ctrls);
if (ret < 0) {
dev_err(imx214->dev, "could not sync v4l2 controls\n");
- goto error;
+ return ret;
}
- ret = regmap_write(imx214->regmap, IMX214_REG_MODE_SELECT, IMX214_MODE_STREAMING);
- if (ret < 0) {
+ ret = cci_write(imx214->regmap, IMX214_REG_MODE_SELECT,
+ IMX214_MODE_STREAMING, NULL);
+ if (ret < 0)
dev_err(imx214->dev, "could not sent start table %d\n", ret);
- goto error;
- }
- mutex_unlock(&imx214->mutex);
- return 0;
-
-error:
- mutex_unlock(&imx214->mutex);
return ret;
}
@@ -867,7 +828,8 @@ static int imx214_stop_streaming(struct imx214 *imx214)
{
int ret;
- ret = regmap_write(imx214->regmap, IMX214_REG_MODE_SELECT, IMX214_MODE_STANDBY);
+ ret = cci_write(imx214->regmap, IMX214_REG_MODE_SELECT,
+ IMX214_MODE_STANDBY, NULL);
if (ret < 0)
dev_err(imx214->dev, "could not sent stop table %d\n", ret);
@@ -877,14 +839,17 @@ static int imx214_stop_streaming(struct imx214 *imx214)
static int imx214_s_stream(struct v4l2_subdev *subdev, int enable)
{
struct imx214 *imx214 = to_imx214(subdev);
- int ret;
+ struct v4l2_subdev_state *state;
+ int ret = 0;
if (enable) {
ret = pm_runtime_resume_and_get(imx214->dev);
if (ret < 0)
return ret;
+ state = v4l2_subdev_lock_and_get_active_state(subdev);
ret = imx214_start_streaming(imx214);
+ v4l2_subdev_unlock_state(state);
if (ret < 0)
goto err_rpm_put;
} else {
@@ -948,7 +913,7 @@ static const struct v4l2_subdev_pad_ops imx214_subdev_pad_ops = {
.enum_mbus_code = imx214_enum_mbus_code,
.enum_frame_size = imx214_enum_frame_size,
.enum_frame_interval = imx214_enum_frame_interval,
- .get_fmt = imx214_get_format,
+ .get_fmt = v4l2_subdev_get_fmt,
.set_fmt = imx214_set_format,
.get_selection = imx214_get_selection,
.get_frame_interval = imx214_get_frame_interval,
@@ -965,12 +930,6 @@ static const struct v4l2_subdev_internal_ops imx214_internal_ops = {
.init_state = imx214_entity_init_state,
};
-static const struct regmap_config sensor_regmap_config = {
- .reg_bits = 16,
- .val_bits = 8,
- .cache_type = REGCACHE_MAPLE,
-};
-
static int imx214_get_regulators(struct device *dev, struct imx214 *imx214)
{
unsigned int i;
@@ -992,28 +951,42 @@ static int imx214_parse_fwnode(struct device *dev)
int ret;
endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
- if (!endpoint) {
- dev_err(dev, "endpoint node not found\n");
- return -EINVAL;
- }
+ if (!endpoint)
+ return dev_err_probe(dev, -EINVAL, "endpoint node not found\n");
ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &bus_cfg);
if (ret) {
- dev_err(dev, "parsing endpoint node failed\n");
+ dev_err_probe(dev, ret, "parsing endpoint node failed\n");
+ goto done;
+ }
+
+ /* Check the number of MIPI CSI2 data lanes */
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != 4) {
+ ret = dev_err_probe(dev, -EINVAL,
+ "only 4 data lanes are currently supported\n");
goto done;
}
- for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++)
+ if (bus_cfg.nr_of_link_frequencies != 1)
+ dev_warn(dev, "Only one link-frequency supported, please review your DT. Continuing anyway\n");
+
+ for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++) {
if (bus_cfg.link_frequencies[i] == IMX214_DEFAULT_LINK_FREQ)
break;
-
- if (i == bus_cfg.nr_of_link_frequencies) {
- dev_err(dev, "link-frequencies %d not supported, Please review your DT\n",
- IMX214_DEFAULT_LINK_FREQ);
- ret = -EINVAL;
- goto done;
+ if (bus_cfg.link_frequencies[i] ==
+ IMX214_DEFAULT_LINK_FREQ_LEGACY) {
+ dev_warn(dev,
+ "link-frequencies %d not supported, please review your DT. Continuing anyway\n",
+ IMX214_DEFAULT_LINK_FREQ);
+ break;
+ }
}
+ if (i == bus_cfg.nr_of_link_frequencies)
+ ret = dev_err_probe(dev, -EINVAL,
+ "link-frequencies %d not supported, please review your DT\n",
+ IMX214_DEFAULT_LINK_FREQ);
+
done:
v4l2_fwnode_endpoint_free(&bus_cfg);
fwnode_handle_put(endpoint);
@@ -1037,34 +1010,28 @@ static int imx214_probe(struct i2c_client *client)
imx214->dev = dev;
imx214->xclk = devm_clk_get(dev, NULL);
- if (IS_ERR(imx214->xclk)) {
- dev_err(dev, "could not get xclk");
- return PTR_ERR(imx214->xclk);
- }
+ if (IS_ERR(imx214->xclk))
+ return dev_err_probe(dev, PTR_ERR(imx214->xclk),
+ "failed to get xclk\n");
ret = clk_set_rate(imx214->xclk, IMX214_DEFAULT_CLK_FREQ);
- if (ret) {
- dev_err(dev, "could not set xclk frequency\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to set xclk frequency\n");
ret = imx214_get_regulators(dev, imx214);
- if (ret < 0) {
- dev_err(dev, "cannot get regulators\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
imx214->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
- if (IS_ERR(imx214->enable_gpio)) {
- dev_err(dev, "cannot get enable gpio\n");
- return PTR_ERR(imx214->enable_gpio);
- }
+ if (IS_ERR(imx214->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(imx214->enable_gpio),
+ "failed to get enable gpio\n");
- imx214->regmap = devm_regmap_init_i2c(client, &sensor_regmap_config);
- if (IS_ERR(imx214->regmap)) {
- dev_err(dev, "regmap init failed\n");
- return PTR_ERR(imx214->regmap);
- }
+ imx214->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(imx214->regmap))
+ return dev_err_probe(dev, PTR_ERR(imx214->regmap),
+ "failed to initialize CCI\n");
v4l2_i2c_subdev_init(&imx214->sd, client, &imx214_subdev_ops);
imx214->sd.internal_ops = &imx214_internal_ops;
@@ -1079,9 +1046,6 @@ static int imx214_probe(struct i2c_client *client)
if (ret < 0)
goto error_power_off;
- mutex_init(&imx214->mutex);
- imx214->ctrls.lock = &imx214->mutex;
-
imx214->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
imx214->pad.flags = MEDIA_PAD_FL_SOURCE;
imx214->sd.dev = &client->dev;
@@ -1089,32 +1053,40 @@ static int imx214_probe(struct i2c_client *client)
ret = media_entity_pads_init(&imx214->sd.entity, 1, &imx214->pad);
if (ret < 0) {
- dev_err(dev, "could not register media entity\n");
+ dev_err_probe(dev, ret, "failed to init entity pads\n");
goto free_ctrl;
}
- imx214_entity_init_state(&imx214->sd, NULL);
+ imx214->sd.state_lock = imx214->ctrls.lock;
+ ret = v4l2_subdev_init_finalize(&imx214->sd);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "subdev init error\n");
+ goto free_entity;
+ }
pm_runtime_set_active(imx214->dev);
pm_runtime_enable(imx214->dev);
ret = v4l2_async_register_subdev_sensor(&imx214->sd);
if (ret < 0) {
- dev_err(dev, "could not register v4l2 device\n");
- goto free_entity;
+ dev_err_probe(dev, ret,
+ "failed to register sensor sub-device\n");
+ goto error_subdev_cleanup;
}
pm_runtime_idle(imx214->dev);
return 0;
-free_entity:
+error_subdev_cleanup:
pm_runtime_disable(imx214->dev);
pm_runtime_set_suspended(&client->dev);
+ v4l2_subdev_cleanup(&imx214->sd);
+
+free_entity:
media_entity_cleanup(&imx214->sd.entity);
free_ctrl:
- mutex_destroy(&imx214->mutex);
v4l2_ctrl_handler_free(&imx214->ctrls);
error_power_off:
@@ -1129,9 +1101,9 @@ static void imx214_remove(struct i2c_client *client)
struct imx214 *imx214 = to_imx214(sd);
v4l2_async_unregister_subdev(&imx214->sd);
+ v4l2_subdev_cleanup(sd);
media_entity_cleanup(&imx214->sd.entity);
v4l2_ctrl_handler_free(&imx214->ctrls);
- mutex_destroy(&imx214->mutex);
pm_runtime_disable(&client->dev);
if (!pm_runtime_status_suspended(&client->dev)) {
imx214_power_off(imx214->dev);
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index 64227eb423d4..dd5a8577d747 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -73,7 +73,7 @@
#define IMX219_REG_VTS CCI_REG16(0x0160)
#define IMX219_VTS_MAX 0xffff
-#define IMX219_VBLANK_MIN 4
+#define IMX219_VBLANK_MIN 32
/* HBLANK control - read only */
#define IMX219_PPL_DEFAULT 3448
diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c
index fcfd1d851bd4..0beb80b8c458 100644
--- a/drivers/media/i2c/imx335.c
+++ b/drivers/media/i2c/imx335.c
@@ -559,12 +559,14 @@ static int imx335_set_ctrl(struct v4l2_ctrl *ctrl)
imx335->vblank,
imx335->vblank + imx335->cur_mode->height);
- return __v4l2_ctrl_modify_range(imx335->exp_ctrl,
- IMX335_EXPOSURE_MIN,
- imx335->vblank +
- imx335->cur_mode->height -
- IMX335_EXPOSURE_OFFSET,
- 1, IMX335_EXPOSURE_DEFAULT);
+ ret = __v4l2_ctrl_modify_range(imx335->exp_ctrl,
+ IMX335_EXPOSURE_MIN,
+ imx335->vblank +
+ imx335->cur_mode->height -
+ IMX335_EXPOSURE_OFFSET,
+ 1, IMX335_EXPOSURE_DEFAULT);
+ if (ret)
+ return ret;
}
/*
@@ -575,6 +577,13 @@ static int imx335_set_ctrl(struct v4l2_ctrl *ctrl)
return 0;
switch (ctrl->id) {
+ case V4L2_CID_VBLANK:
+ exposure = imx335->exp_ctrl->val;
+ analog_gain = imx335->again_ctrl->val;
+
+ ret = imx335_update_exp_gain(imx335, exposure, analog_gain);
+
+ break;
case V4L2_CID_EXPOSURE:
exposure = ctrl->val;
analog_gain = imx335->again_ctrl->val;
diff --git a/drivers/media/i2c/ov08x40.c b/drivers/media/i2c/ov08x40.c
index 83b49cf114ac..625fbcd39068 100644
--- a/drivers/media/i2c/ov08x40.c
+++ b/drivers/media/i2c/ov08x40.c
@@ -1937,6 +1937,32 @@ static int ov08x40_stop_streaming(struct ov08x40 *ov08x)
OV08X40_REG_VALUE_08BIT, OV08X40_MODE_STANDBY);
}
+/* Verify chip ID */
+static int ov08x40_identify_module(struct ov08x40 *ov08x)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
+ int ret;
+ u32 val;
+
+ if (ov08x->identified)
+ return 0;
+
+ ret = ov08x40_read_reg(ov08x, OV08X40_REG_CHIP_ID,
+ OV08X40_REG_VALUE_24BIT, &val);
+ if (ret)
+ return ret;
+
+ if (val != OV08X40_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ OV08X40_CHIP_ID, val);
+ return -ENXIO;
+ }
+
+ ov08x->identified = true;
+
+ return 0;
+}
+
static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov08x40 *ov08x = to_ov08x40(sd);
@@ -1950,6 +1976,10 @@ static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
if (ret < 0)
goto err_unlock;
+ ret = ov08x40_identify_module(ov08x);
+ if (ret)
+ goto err_rpm_put;
+
/*
* Apply default & customized values
* and then start streaming.
@@ -1974,32 +2004,6 @@ err_unlock:
return ret;
}
-/* Verify chip ID */
-static int ov08x40_identify_module(struct ov08x40 *ov08x)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
- int ret;
- u32 val;
-
- if (ov08x->identified)
- return 0;
-
- ret = ov08x40_read_reg(ov08x, OV08X40_REG_CHIP_ID,
- OV08X40_REG_VALUE_24BIT, &val);
- if (ret)
- return ret;
-
- if (val != OV08X40_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
- OV08X40_CHIP_ID, val);
- return -ENXIO;
- }
-
- ov08x->identified = true;
-
- return 0;
-}
-
static const struct v4l2_subdev_video_ops ov08x40_video_ops = {
.s_stream = ov08x40_set_stream,
};
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index 9a5d118b87b0..04e93618f408 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -828,8 +828,10 @@ static int ov2740_init_controls(struct ov2740 *ov2740)
0, 0, ov2740_test_pattern_menu);
ret = v4l2_fwnode_device_parse(&client->dev, &props);
- if (ret)
+ if (ret) {
+ v4l2_ctrl_handler_free(ctrl_hdlr);
return ret;
+ }
v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &ov2740_ctrl_ops, &props);
diff --git a/drivers/media/i2c/tc358746.c b/drivers/media/i2c/tc358746.c
index 389582420ba7..048a1a381b33 100644
--- a/drivers/media/i2c/tc358746.c
+++ b/drivers/media/i2c/tc358746.c
@@ -460,24 +460,20 @@ out:
return err;
}
-/* Use MHz as base so the div needs no u64 */
-static u32 tc358746_cfg_to_cnt(unsigned int cfg_val,
- unsigned int clk_mhz,
- unsigned int time_base)
+static u32 tc358746_cfg_to_cnt(unsigned long cfg_val, unsigned long clk_hz,
+ unsigned long long time_base)
{
- return DIV_ROUND_UP(cfg_val * clk_mhz, time_base);
+ return div64_u64((u64)cfg_val * clk_hz + time_base - 1, time_base);
}
-static u32 tc358746_ps_to_cnt(unsigned int cfg_val,
- unsigned int clk_mhz)
+static u32 tc358746_ps_to_cnt(unsigned long cfg_val, unsigned long clk_hz)
{
- return tc358746_cfg_to_cnt(cfg_val, clk_mhz, USEC_PER_SEC);
+ return tc358746_cfg_to_cnt(cfg_val, clk_hz, PSEC_PER_SEC);
}
-static u32 tc358746_us_to_cnt(unsigned int cfg_val,
- unsigned int clk_mhz)
+static u32 tc358746_us_to_cnt(unsigned long cfg_val, unsigned long clk_hz)
{
- return tc358746_cfg_to_cnt(cfg_val, clk_mhz, 1);
+ return tc358746_cfg_to_cnt(cfg_val, clk_hz, USEC_PER_SEC);
}
static int tc358746_apply_dphy_config(struct tc358746 *tc358746)
@@ -492,7 +488,6 @@ static int tc358746_apply_dphy_config(struct tc358746 *tc358746)
/* The hs_byte_clk is also called SYSCLK in the excel sheet */
hs_byte_clk = cfg->hs_clk_rate / 8;
- hs_byte_clk /= HZ_PER_MHZ;
hf_clk = hs_byte_clk / 2;
val = tc358746_us_to_cnt(cfg->init, hf_clk) - 1;
diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c
index 858db5d4ca75..e51f2ed3f031 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.c
+++ b/drivers/media/platform/qcom/camss/camss-csid.c
@@ -683,11 +683,13 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
int ret;
if (enable) {
- ret = v4l2_ctrl_handler_setup(&csid->ctrls);
- if (ret < 0) {
- dev_err(csid->camss->dev,
- "could not sync v4l2 controls: %d\n", ret);
- return ret;
+ if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) {
+ ret = v4l2_ctrl_handler_setup(&csid->ctrls);
+ if (ret < 0) {
+ dev_err(csid->camss->dev,
+ "could not sync v4l2 controls: %d\n", ret);
+ return ret;
+ }
}
if (!csid->testgen.enabled &&
@@ -761,7 +763,8 @@ static void csid_try_format(struct csid_device *csid,
break;
case MSM_CSID_PAD_SRC:
- if (csid->testgen_mode->cur.val == 0) {
+ if (csid->testgen.nmodes == CSID_PAYLOAD_MODE_DISABLED ||
+ csid->testgen_mode->cur.val == 0) {
/* Test generator is disabled, */
/* keep pad formats in sync */
u32 code = fmt->code;
@@ -811,7 +814,8 @@ static int csid_enum_mbus_code(struct v4l2_subdev *sd,
code->code = csid->res->formats->formats[code->index].code;
} else {
- if (csid->testgen_mode->cur.val == 0) {
+ if (csid->testgen.nmodes == CSID_PAYLOAD_MODE_DISABLED ||
+ csid->testgen_mode->cur.val == 0) {
struct v4l2_mbus_framefmt *sink_fmt;
sink_fmt = __csid_get_format(csid, sd_state,
@@ -1190,7 +1194,8 @@ static int csid_link_setup(struct media_entity *entity,
/* If test generator is enabled */
/* do not allow a link from CSIPHY to CSID */
- if (csid->testgen_mode->cur.val != 0)
+ if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED &&
+ csid->testgen_mode->cur.val != 0)
return -EBUSY;
sd = media_entity_to_v4l2_subdev(remote->entity);
@@ -1283,24 +1288,27 @@ int msm_csid_register_entity(struct csid_device *csid,
MSM_CSID_NAME, csid->id);
v4l2_set_subdevdata(sd, csid);
- ret = v4l2_ctrl_handler_init(&csid->ctrls, 1);
- if (ret < 0) {
- dev_err(dev, "Failed to init ctrl handler: %d\n", ret);
- return ret;
- }
+ if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) {
+ ret = v4l2_ctrl_handler_init(&csid->ctrls, 1);
+ if (ret < 0) {
+ dev_err(dev, "Failed to init ctrl handler: %d\n", ret);
+ return ret;
+ }
- csid->testgen_mode = v4l2_ctrl_new_std_menu_items(&csid->ctrls,
- &csid_ctrl_ops, V4L2_CID_TEST_PATTERN,
- csid->testgen.nmodes, 0, 0,
- csid->testgen.modes);
+ csid->testgen_mode =
+ v4l2_ctrl_new_std_menu_items(&csid->ctrls,
+ &csid_ctrl_ops, V4L2_CID_TEST_PATTERN,
+ csid->testgen.nmodes, 0, 0,
+ csid->testgen.modes);
- if (csid->ctrls.error) {
- dev_err(dev, "Failed to init ctrl: %d\n", csid->ctrls.error);
- ret = csid->ctrls.error;
- goto free_ctrl;
- }
+ if (csid->ctrls.error) {
+ dev_err(dev, "Failed to init ctrl: %d\n", csid->ctrls.error);
+ ret = csid->ctrls.error;
+ goto free_ctrl;
+ }
- csid->subdev.ctrl_handler = &csid->ctrls;
+ csid->subdev.ctrl_handler = &csid->ctrls;
+ }
ret = csid_init_formats(sd, NULL);
if (ret < 0) {
@@ -1331,7 +1339,8 @@ int msm_csid_register_entity(struct csid_device *csid,
media_cleanup:
media_entity_cleanup(&sd->entity);
free_ctrl:
- v4l2_ctrl_handler_free(&csid->ctrls);
+ if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED)
+ v4l2_ctrl_handler_free(&csid->ctrls);
return ret;
}
@@ -1344,7 +1353,8 @@ void msm_csid_unregister_entity(struct csid_device *csid)
{
v4l2_device_unregister_subdev(&csid->subdev);
media_entity_cleanup(&csid->subdev.entity);
- v4l2_ctrl_handler_free(&csid->ctrls);
+ if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED)
+ v4l2_ctrl_handler_free(&csid->ctrls);
}
inline bool csid_is_lite(struct csid_device *csid)
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index 95f6a1ac7eaf..3c5811c6c028 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -400,6 +400,10 @@ static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
return sink_code;
}
break;
+ default:
+ WARN(1, "Unsupported HW version: %x\n",
+ vfe->camss->res->version);
+ break;
}
return 0;
}
diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
index 7b3a37957e3a..d151d2ed1f64 100644
--- a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
@@ -797,13 +797,12 @@ static int c8sectpfe_probe(struct platform_device *pdev)
}
tsin->i2c_adapter =
of_find_i2c_adapter_by_node(i2c_bus);
+ of_node_put(i2c_bus);
if (!tsin->i2c_adapter) {
dev_err(&pdev->dev, "No i2c adapter found\n");
- of_node_put(i2c_bus);
ret = -ENODEV;
goto err_node_put;
}
- of_node_put(i2c_bus);
/* Acquire reset GPIO and activate it */
tsin->rst_gpio = devm_fwnode_gpiod_get(dev,
diff --git a/drivers/media/platform/st/stm32/stm32-csi.c b/drivers/media/platform/st/stm32/stm32-csi.c
index 48941aae8c9b..0c776e4a7ce8 100644
--- a/drivers/media/platform/st/stm32/stm32-csi.c
+++ b/drivers/media/platform/st/stm32/stm32-csi.c
@@ -325,7 +325,6 @@ static const struct stm32_csi_mbps_phy_reg snps_stm32mp25[] = {
{ .mbps = 2400, .hsfreqrange = 0x47, .osc_freq_target = 442 },
{ .mbps = 2450, .hsfreqrange = 0x48, .osc_freq_target = 451 },
{ .mbps = 2500, .hsfreqrange = 0x49, .osc_freq_target = 460 },
- { /* sentinel */ }
};
static const struct v4l2_mbus_framefmt fmt_default = {
@@ -444,13 +443,13 @@ static void stm32_csi_phy_reg_write(struct stm32_csi_dev *csidev,
static int stm32_csi_start(struct stm32_csi_dev *csidev,
struct v4l2_subdev_state *state)
{
- const struct stm32_csi_mbps_phy_reg *phy_regs;
+ const struct stm32_csi_mbps_phy_reg *phy_regs = NULL;
struct v4l2_mbus_framefmt *sink_fmt;
const struct stm32_csi_fmts *fmt;
unsigned long phy_clk_frate;
+ u32 lanes_ie, lanes_en;
unsigned int mbps;
- u32 lanes_ie = 0;
- u32 lanes_en = 0;
+ unsigned int i;
s64 link_freq;
int ret;
u32 ccfr;
@@ -474,11 +473,14 @@ static int stm32_csi_start(struct stm32_csi_dev *csidev,
mbps = div_s64(link_freq, 500000);
dev_dbg(csidev->dev, "Computed Mbps: %u\n", mbps);
- for (phy_regs = snps_stm32mp25; phy_regs->mbps != 0; phy_regs++)
- if (phy_regs->mbps >= mbps)
+ for (i = 0; i < ARRAY_SIZE(snps_stm32mp25); i++) {
+ if (snps_stm32mp25[i].mbps >= mbps) {
+ phy_regs = &snps_stm32mp25[i];
break;
+ }
+ }
- if (!phy_regs->mbps) {
+ if (!phy_regs) {
dev_err(csidev->dev, "Unsupported PHY speed (%u Mbps)", mbps);
return -ERANGE;
}
@@ -488,8 +490,8 @@ static int stm32_csi_start(struct stm32_csi_dev *csidev,
phy_regs->osc_freq_target);
/* Prepare lanes related configuration bits */
- lanes_ie |= STM32_CSI_SR1_DL0_ERRORS;
- lanes_en |= STM32_CSI_PCR_DL0EN;
+ lanes_ie = STM32_CSI_SR1_DL0_ERRORS;
+ lanes_en = STM32_CSI_PCR_DL0EN;
if (csidev->num_lanes == 2) {
lanes_ie |= STM32_CSI_SR1_DL1_ERRORS;
lanes_en |= STM32_CSI_PCR_DL1EN;
@@ -497,21 +499,19 @@ static int stm32_csi_start(struct stm32_csi_dev *csidev,
ret = pm_runtime_get_sync(csidev->dev);
if (ret < 0)
- return ret;
+ goto error_put;
/* Retrieve CSI2PHY clock rate to compute CCFR value */
phy_clk_frate = clk_get_rate(csidev->clks[STM32_CSI_CLK_CSI2PHY].clk);
if (!phy_clk_frate) {
- pm_runtime_put(csidev->dev);
dev_err(csidev->dev, "CSI2PHY clock rate invalid (0)\n");
- return ret;
+ ret = -EINVAL;
+ goto error_put;
}
ret = stm32_csi_setup_lane_merger(csidev);
- if (ret) {
- pm_runtime_put(csidev->dev);
- return ret;
- }
+ if (ret)
+ goto error_put;
/* Enable the CSI */
writel_relaxed(STM32_CSI_CR_CSIEN, csidev->base + STM32_CSI_CR);
@@ -567,6 +567,10 @@ static int stm32_csi_start(struct stm32_csi_dev *csidev,
writel_relaxed(0, csidev->base + STM32_CSI_PMCR);
return ret;
+
+error_put:
+ pm_runtime_put(csidev->dev);
+ return ret;
}
static void stm32_csi_stop(struct stm32_csi_dev *csidev)
diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-cap.c b/drivers/media/test-drivers/vivid/vivid-kthread-cap.c
index 669bd96da4c7..273e8ed8c2a9 100644
--- a/drivers/media/test-drivers/vivid/vivid-kthread-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-kthread-cap.c
@@ -789,9 +789,14 @@ static int vivid_thread_vid_cap(void *data)
next_jiffies_since_start = jiffies_since_start;
wait_jiffies = next_jiffies_since_start - jiffies_since_start;
- while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
- !kthread_should_stop())
- schedule();
+ if (!time_is_after_jiffies(cur_jiffies + wait_jiffies))
+ continue;
+
+ wait_queue_head_t wait;
+
+ init_waitqueue_head(&wait);
+ wait_event_interruptible_timeout(wait, kthread_should_stop(),
+ cur_jiffies + wait_jiffies - jiffies);
}
dprintk(dev, 1, "Video Capture Thread End\n");
return 0;
diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-out.c b/drivers/media/test-drivers/vivid/vivid-kthread-out.c
index fac6208b51da..015a7b166a1e 100644
--- a/drivers/media/test-drivers/vivid/vivid-kthread-out.c
+++ b/drivers/media/test-drivers/vivid/vivid-kthread-out.c
@@ -235,9 +235,14 @@ static int vivid_thread_vid_out(void *data)
next_jiffies_since_start = jiffies_since_start;
wait_jiffies = next_jiffies_since_start - jiffies_since_start;
- while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
- !kthread_should_stop())
- schedule();
+ if (!time_is_after_jiffies(cur_jiffies + wait_jiffies))
+ continue;
+
+ wait_queue_head_t wait;
+
+ init_waitqueue_head(&wait);
+ wait_event_interruptible_timeout(wait, kthread_should_stop(),
+ cur_jiffies + wait_jiffies - jiffies);
}
dprintk(dev, 1, "Video Output Thread End\n");
return 0;
diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-touch.c b/drivers/media/test-drivers/vivid/vivid-kthread-touch.c
index fa711ee36a3f..c862689786b6 100644
--- a/drivers/media/test-drivers/vivid/vivid-kthread-touch.c
+++ b/drivers/media/test-drivers/vivid/vivid-kthread-touch.c
@@ -135,9 +135,14 @@ static int vivid_thread_touch_cap(void *data)
next_jiffies_since_start = jiffies_since_start;
wait_jiffies = next_jiffies_since_start - jiffies_since_start;
- while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
- !kthread_should_stop())
- schedule();
+ if (!time_is_after_jiffies(cur_jiffies + wait_jiffies))
+ continue;
+
+ wait_queue_head_t wait;
+
+ init_waitqueue_head(&wait);
+ wait_event_interruptible_timeout(wait, kthread_should_stop(),
+ cur_jiffies + wait_jiffies - jiffies);
}
dprintk(dev, 1, "Touch Capture Thread End\n");
return 0;
diff --git a/drivers/media/test-drivers/vivid/vivid-sdr-cap.c b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
index 74a91d28c8be..c633fc2ed664 100644
--- a/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
@@ -206,9 +206,14 @@ static int vivid_thread_sdr_cap(void *data)
next_jiffies_since_start = jiffies_since_start;
wait_jiffies = next_jiffies_since_start - jiffies_since_start;
- while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
- !kthread_should_stop())
- schedule();
+ if (!time_is_after_jiffies(cur_jiffies + wait_jiffies))
+ continue;
+
+ wait_queue_head_t wait;
+
+ init_waitqueue_head(&wait);
+ wait_event_interruptible_timeout(wait, kthread_should_stop(),
+ cur_jiffies + wait_jiffies - jiffies);
}
dprintk(dev, 1, "SDR Capture Thread End\n");
return 0;
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index a4a9781328c5..06d61e52f018 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -1720,6 +1720,8 @@ static void cx231xx_video_dev_init(
vfd->lock = &dev->lock;
vfd->release = video_device_release_empty;
vfd->ctrl_handler = &dev->mpeg_ctrl_handler.hdl;
+ vfd->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_CAPTURE;
video_set_drvdata(vfd, dev);
if (dev->tuner_type == TUNER_ABSENT) {
v4l2_disable_ioctl(vfd, VIDIOC_G_FREQUENCY);
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 4e58476d305e..4a55cf78ec52 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -862,6 +862,25 @@ static inline void uvc_clear_bit(u8 *data, int bit)
data[bit >> 3] &= ~(1 << (bit & 7));
}
+static s32 uvc_menu_to_v4l2_menu(struct uvc_control_mapping *mapping, s32 val)
+{
+ unsigned int i;
+
+ for (i = 0; BIT(i) <= mapping->menu_mask; ++i) {
+ u32 menu_value;
+
+ if (!test_bit(i, &mapping->menu_mask))
+ continue;
+
+ menu_value = uvc_mapping_get_menu_value(mapping, i);
+
+ if (menu_value == val)
+ return i;
+ }
+
+ return val;
+}
+
/*
* Extract the bit string specified by mapping->offset and mapping->size
* from the little-endian data stored at 'data' and return the result as
@@ -896,6 +915,16 @@ static s32 uvc_get_le_value(struct uvc_control_mapping *mapping,
if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED)
value |= -(value & (1 << (mapping->size - 1)));
+ /* If it is a menu, convert from uvc to v4l2. */
+ if (mapping->v4l2_type != V4L2_CTRL_TYPE_MENU)
+ return value;
+
+ switch (query) {
+ case UVC_GET_CUR:
+ case UVC_GET_DEF:
+ return uvc_menu_to_v4l2_menu(mapping, value);
+ }
+
return value;
}
@@ -1060,32 +1089,6 @@ static int uvc_ctrl_populate_cache(struct uvc_video_chain *chain,
return 0;
}
-static s32 __uvc_ctrl_get_value(struct uvc_control_mapping *mapping,
- const u8 *data)
-{
- s32 value = mapping->get(mapping, UVC_GET_CUR, data);
-
- if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) {
- unsigned int i;
-
- for (i = 0; BIT(i) <= mapping->menu_mask; ++i) {
- u32 menu_value;
-
- if (!test_bit(i, &mapping->menu_mask))
- continue;
-
- menu_value = uvc_mapping_get_menu_value(mapping, i);
-
- if (menu_value == value) {
- value = i;
- break;
- }
- }
- }
-
- return value;
-}
-
static int __uvc_ctrl_load_cur(struct uvc_video_chain *chain,
struct uvc_control *ctrl)
{
@@ -1136,8 +1139,8 @@ static int __uvc_ctrl_get(struct uvc_video_chain *chain,
if (ret < 0)
return ret;
- *value = __uvc_ctrl_get_value(mapping,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
+ *value = mapping->get(mapping, UVC_GET_CUR,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
return 0;
}
@@ -1287,7 +1290,6 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
{
struct uvc_control_mapping *master_map = NULL;
struct uvc_control *master_ctrl = NULL;
- unsigned int i;
memset(v4l2_ctrl, 0, sizeof(*v4l2_ctrl));
v4l2_ctrl->id = mapping->id;
@@ -1330,21 +1332,6 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
v4l2_ctrl->minimum = ffs(mapping->menu_mask) - 1;
v4l2_ctrl->maximum = fls(mapping->menu_mask) - 1;
v4l2_ctrl->step = 1;
-
- for (i = 0; BIT(i) <= mapping->menu_mask; ++i) {
- u32 menu_value;
-
- if (!test_bit(i, &mapping->menu_mask))
- continue;
-
- menu_value = uvc_mapping_get_menu_value(mapping, i);
-
- if (menu_value == v4l2_ctrl->default_value) {
- v4l2_ctrl->default_value = i;
- break;
- }
- }
-
return 0;
case V4L2_CTRL_TYPE_BOOLEAN:
@@ -1630,7 +1617,7 @@ void uvc_ctrl_status_event(struct uvc_video_chain *chain,
uvc_ctrl_set_handle(handle, ctrl, NULL);
list_for_each_entry(mapping, &ctrl->info.mappings, list) {
- s32 value = __uvc_ctrl_get_value(mapping, data);
+ s32 value = mapping->get(mapping, UVC_GET_CUR, data);
/*
* handle may be NULL here if the device sends auto-update
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 93c6cdb23881..75216507cbdc 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -108,6 +108,12 @@ static int uvc_ioctl_xu_ctrl_map(struct uvc_video_chain *chain,
struct uvc_control_mapping *map;
int ret;
+ if (xmap->data_type > UVC_CTRL_DATA_TYPE_BITMASK) {
+ uvc_dbg(chain->dev, CONTROL,
+ "Unsupported UVC data type %u\n", xmap->data_type);
+ return -EINVAL;
+ }
+
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (map == NULL)
return -ENOMEM;
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index cde1774c9098..a3074f469b15 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -444,6 +444,8 @@ static int call_enum_dv_timings(struct v4l2_subdev *sd,
static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_mbus_config *config)
{
+ memset(config, 0, sizeof(*config));
+
return check_pad(sd, pad) ? :
sd->ops->pad->get_mbus_config(sd, pad, config);
}
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index a9604ba3c805..f0746db92ca6 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -2915,14 +2915,14 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
timeout = 10;
break;
- case RESERVE:
+ case RESERVE_6:
cmdLen = 6;
dir = MPI_SCSIIO_CONTROL_READ;
CDB[0] = cmd;
timeout = 10;
break;
- case RELEASE:
+ case RELEASE_6:
cmdLen = 6;
dir = MPI_SCSIIO_CONTROL_READ;
CDB[0] = cmd;
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index cff56deba24f..e9914e8a29a3 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -224,6 +224,7 @@ static const struct regmap_range axp717_writeable_ranges[] = {
regmap_reg_range(AXP717_VSYS_V_POWEROFF, AXP717_VSYS_V_POWEROFF),
regmap_reg_range(AXP717_IRQ0_EN, AXP717_IRQ4_EN),
regmap_reg_range(AXP717_IRQ0_STATE, AXP717_IRQ4_STATE),
+ regmap_reg_range(AXP717_TS_PIN_CFG, AXP717_TS_PIN_CFG),
regmap_reg_range(AXP717_ICC_CHG_SET, AXP717_CV_CHG_SET),
regmap_reg_range(AXP717_DCDC_OUTPUT_CONTROL, AXP717_CPUSLDO_CONTROL),
regmap_reg_range(AXP717_ADC_CH_EN_CONTROL, AXP717_ADC_CH_EN_CONTROL),
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index aa4a9940b569..ae71a2710bed 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -47,6 +47,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
struct regmap_config syscon_config = syscon_regmap_config;
struct resource res;
struct reset_control *reset;
+ resource_size_t res_size;
WARN_ON(!mutex_is_locked(&syscon_list_lock));
@@ -96,6 +97,12 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
}
}
+ res_size = resource_size(&res);
+ if (res_size < reg_io_width) {
+ ret = -EFAULT;
+ goto err_regmap;
+ }
+
syscon_config.name = kasprintf(GFP_KERNEL, "%pOFn@%pa", np, &res.start);
if (!syscon_config.name) {
ret = -ENOMEM;
@@ -103,7 +110,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
}
syscon_config.reg_stride = reg_io_width;
syscon_config.val_bits = reg_io_width * 8;
- syscon_config.max_register = resource_size(&res) - reg_io_width;
+ syscon_config.max_register = res_size - reg_io_width;
if (!syscon_config.max_register)
syscon_config.max_register_is_0 = true;
diff --git a/drivers/mfd/tps65219.c b/drivers/mfd/tps65219.c
index 081c5a30b04a..4aca922658e3 100644
--- a/drivers/mfd/tps65219.c
+++ b/drivers/mfd/tps65219.c
@@ -221,7 +221,6 @@ static const struct regmap_irq_chip tps65219_irq_chip = {
static int tps65219_probe(struct i2c_client *client)
{
struct tps65219 *tps;
- unsigned int chipid;
bool pwr_button;
int ret;
@@ -246,12 +245,6 @@ static int tps65219_probe(struct i2c_client *client)
if (ret)
return ret;
- ret = regmap_read(tps->regmap, TPS65219_REG_TI_DEV_ID, &chipid);
- if (ret) {
- dev_err(tps->dev, "Failed to read device ID: %d\n", ret);
- return ret;
- }
-
ret = devm_mfd_add_devices(tps->dev, PLATFORM_DEVID_AUTO,
tps65219_cells, ARRAY_SIZE(tps65219_cells),
NULL, 0, regmap_irq_get_domain(tps->irq_data));
diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c
index 89224d4af4a2..e13f9fdd9d7b 100644
--- a/drivers/misc/eeprom/ee1004.c
+++ b/drivers/misc/eeprom/ee1004.c
@@ -304,6 +304,10 @@ static int ee1004_probe(struct i2c_client *client)
I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_READ_BYTE_DATA))
return -EPFNOSUPPORT;
+ err = i2c_smbus_read_byte(client);
+ if (err < 0)
+ return -ENODEV;
+
mutex_lock(&ee1004_bus_lock);
err = ee1004_init_bus_data(client);
diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
index 5b861dbff27e..6c24426104ba 100644
--- a/drivers/misc/lkdtm/perms.c
+++ b/drivers/misc/lkdtm/perms.c
@@ -29,6 +29,13 @@ static const unsigned long rodata = 0xAA55AA55;
static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
/*
+ * This is a pointer to do_nothing() which is initialized at runtime rather
+ * than build time to avoid objtool IBT validation warnings caused by an
+ * inlined unrolled memcpy() in execute_location().
+ */
+static void __ro_after_init *do_nothing_ptr;
+
+/*
* This just returns to the caller. It is designed to be copied into
* non-executable memory regions.
*/
@@ -65,13 +72,12 @@ static noinline __nocfi void execute_location(void *dst, bool write)
{
void (*func)(void);
func_desc_t fdesc;
- void *do_nothing_text = dereference_function_descriptor(do_nothing);
- pr_info("attempting ok execution at %px\n", do_nothing_text);
+ pr_info("attempting ok execution at %px\n", do_nothing_ptr);
do_nothing();
if (write == CODE_WRITE) {
- memcpy(dst, do_nothing_text, EXEC_SIZE);
+ memcpy(dst, do_nothing_ptr, EXEC_SIZE);
flush_icache_range((unsigned long)dst,
(unsigned long)dst + EXEC_SIZE);
}
@@ -267,6 +273,8 @@ static void lkdtm_ACCESS_NULL(void)
void __init lkdtm_perms_init(void)
{
+ do_nothing_ptr = dereference_function_descriptor(do_nothing);
+
/* Make sure we can write to __ro_after_init values during __init */
ro_after_init |= 0xAA;
}
diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
index 04756302b878..98d3d123004c 100644
--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
+++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
@@ -37,6 +37,7 @@
struct pci1xxxx_gpio {
struct auxiliary_device *aux_dev;
void __iomem *reg_base;
+ raw_spinlock_t wa_lock;
struct gpio_chip gpio;
spinlock_t lock;
int irq_base;
@@ -167,7 +168,7 @@ static void pci1xxxx_gpio_irq_ack(struct irq_data *data)
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
- pci1xxx_assign_bit(priv->reg_base, INTR_STAT_OFFSET(gpio), (gpio % 32), true);
+ writel(BIT(gpio % 32), priv->reg_base + INTR_STAT_OFFSET(gpio));
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -257,6 +258,7 @@ static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id)
struct pci1xxxx_gpio *priv = dev_id;
struct gpio_chip *gc = &priv->gpio;
unsigned long int_status = 0;
+ unsigned long wa_flags;
unsigned long flags;
u8 pincount;
int bit;
@@ -280,7 +282,9 @@ static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id)
writel(BIT(bit), priv->reg_base + INTR_STATUS_OFFSET(gpiobank));
spin_unlock_irqrestore(&priv->lock, flags);
irq = irq_find_mapping(gc->irq.domain, (bit + (gpiobank * 32)));
- handle_nested_irq(irq);
+ raw_spin_lock_irqsave(&priv->wa_lock, wa_flags);
+ generic_handle_irq(irq);
+ raw_spin_unlock_irqrestore(&priv->wa_lock, wa_flags);
}
}
spin_lock_irqsave(&priv->lock, flags);
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index a5f88ec97df7..bc40b940ae21 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -117,6 +117,7 @@
#define MEI_DEV_ID_LNL_M 0xA870 /* Lunar Lake Point M */
+#define MEI_DEV_ID_PTL_H 0xE370 /* Panther Lake H */
#define MEI_DEV_ID_PTL_P 0xE470 /* Panther Lake P */
/*
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index d6ff9d82ae94..3f9c60b579ae 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -124,6 +124,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_LNL_M, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_H, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_PTL_P, MEI_ME_PCH15_CFG)},
/* required last entry */
diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
index 7be1649b1972..da26a080916c 100644
--- a/drivers/misc/mei/vsc-tp.c
+++ b/drivers/misc/mei/vsc-tp.c
@@ -36,20 +36,24 @@
#define VSC_TP_XFER_TIMEOUT_BYTES 700
#define VSC_TP_PACKET_PADDING_SIZE 1
#define VSC_TP_PACKET_SIZE(pkt) \
- (sizeof(struct vsc_tp_packet) + le16_to_cpu((pkt)->len) + VSC_TP_CRC_SIZE)
+ (sizeof(struct vsc_tp_packet_hdr) + le16_to_cpu((pkt)->hdr.len) + VSC_TP_CRC_SIZE)
#define VSC_TP_MAX_PACKET_SIZE \
- (sizeof(struct vsc_tp_packet) + VSC_TP_MAX_MSG_SIZE + VSC_TP_CRC_SIZE)
+ (sizeof(struct vsc_tp_packet_hdr) + VSC_TP_MAX_MSG_SIZE + VSC_TP_CRC_SIZE)
#define VSC_TP_MAX_XFER_SIZE \
(VSC_TP_MAX_PACKET_SIZE + VSC_TP_XFER_TIMEOUT_BYTES)
#define VSC_TP_NEXT_XFER_LEN(len, offset) \
- (len + sizeof(struct vsc_tp_packet) + VSC_TP_CRC_SIZE - offset + VSC_TP_PACKET_PADDING_SIZE)
+ (len + sizeof(struct vsc_tp_packet_hdr) + VSC_TP_CRC_SIZE - offset + VSC_TP_PACKET_PADDING_SIZE)
-struct vsc_tp_packet {
+struct vsc_tp_packet_hdr {
__u8 sync;
__u8 cmd;
__le16 len;
__le32 seq;
- __u8 buf[] __counted_by(len);
+};
+
+struct vsc_tp_packet {
+ struct vsc_tp_packet_hdr hdr;
+ __u8 buf[VSC_TP_MAX_XFER_SIZE - sizeof(struct vsc_tp_packet_hdr)];
};
struct vsc_tp {
@@ -67,8 +71,8 @@ struct vsc_tp {
u32 seq;
/* command buffer */
- void *tx_buf;
- void *rx_buf;
+ struct vsc_tp_packet *tx_buf;
+ struct vsc_tp_packet *rx_buf;
atomic_t assert_cnt;
wait_queue_head_t xfer_wait;
@@ -158,12 +162,12 @@ static int vsc_tp_dev_xfer(struct vsc_tp *tp, void *obuf, void *ibuf, size_t len
static int vsc_tp_xfer_helper(struct vsc_tp *tp, struct vsc_tp_packet *pkt,
void *ibuf, u16 ilen)
{
- int ret, offset = 0, cpy_len, src_len, dst_len = sizeof(struct vsc_tp_packet);
+ int ret, offset = 0, cpy_len, src_len, dst_len = sizeof(struct vsc_tp_packet_hdr);
int next_xfer_len = VSC_TP_PACKET_SIZE(pkt) + VSC_TP_XFER_TIMEOUT_BYTES;
- u8 *src, *crc_src, *rx_buf = tp->rx_buf;
+ u8 *src, *crc_src, *rx_buf = (u8 *)tp->rx_buf;
int count_down = VSC_TP_MAX_XFER_COUNT;
u32 recv_crc = 0, crc = ~0;
- struct vsc_tp_packet ack;
+ struct vsc_tp_packet_hdr ack;
u8 *dst = (u8 *)&ack;
bool synced = false;
@@ -280,10 +284,10 @@ int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
guard(mutex)(&tp->mutex);
- pkt->sync = VSC_TP_PACKET_SYNC;
- pkt->cmd = cmd;
- pkt->len = cpu_to_le16(olen);
- pkt->seq = cpu_to_le32(++tp->seq);
+ pkt->hdr.sync = VSC_TP_PACKET_SYNC;
+ pkt->hdr.cmd = cmd;
+ pkt->hdr.len = cpu_to_le16(olen);
+ pkt->hdr.seq = cpu_to_le32(++tp->seq);
memcpy(pkt->buf, obuf, olen);
crc = ~crc32(~0, (u8 *)pkt, sizeof(pkt) + olen);
@@ -320,7 +324,7 @@ int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len)
guard(mutex)(&tp->mutex);
/* rom xfer is big endian */
- cpu_to_be32_array(tp->tx_buf, obuf, words);
+ cpu_to_be32_array((u32 *)tp->tx_buf, obuf, words);
ret = read_poll_timeout(gpiod_get_value_cansleep, ret,
!ret, VSC_TP_ROM_XFER_POLL_DELAY_US,
@@ -336,7 +340,7 @@ int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len)
return ret;
if (ibuf)
- be32_to_cpu_array(ibuf, tp->rx_buf, words);
+ be32_to_cpu_array(ibuf, (u32 *)tp->rx_buf, words);
return ret;
}
@@ -490,11 +494,11 @@ static int vsc_tp_probe(struct spi_device *spi)
if (!tp)
return -ENOMEM;
- tp->tx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL);
+ tp->tx_buf = devm_kzalloc(dev, sizeof(*tp->tx_buf), GFP_KERNEL);
if (!tp->tx_buf)
return -ENOMEM;
- tp->rx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL);
+ tp->rx_buf = devm_kzalloc(dev, sizeof(*tp->rx_buf), GFP_KERNEL);
if (!tp->rx_buf)
return -ENOMEM;
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 4c0f37ad0281..8a7e860c0681 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -295,11 +295,13 @@ static int pci_endpoint_test_bar(struct pci_endpoint_test *test,
struct pci_dev *pdev = test->pdev;
int buf_size;
+ bar_size = pci_resource_len(pdev, barno);
+ if (!bar_size)
+ return -ENODATA;
+
if (!test->bar[barno])
return -ENOMEM;
- bar_size = pci_resource_len(pdev, barno);
-
if (barno == test->test_reg_bar)
bar_size = 0x4;
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 53d32d0f2709..e3548408ca39 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -27,6 +27,8 @@ enum dw_mci_exynos_type {
DW_MCI_TYPE_EXYNOS5420_SMU,
DW_MCI_TYPE_EXYNOS7,
DW_MCI_TYPE_EXYNOS7_SMU,
+ DW_MCI_TYPE_EXYNOS7870,
+ DW_MCI_TYPE_EXYNOS7870_SMU,
DW_MCI_TYPE_ARTPEC8,
};
@@ -70,6 +72,12 @@ static struct dw_mci_exynos_compatible {
.compatible = "samsung,exynos7-dw-mshc-smu",
.ctrl_type = DW_MCI_TYPE_EXYNOS7_SMU,
}, {
+ .compatible = "samsung,exynos7870-dw-mshc",
+ .ctrl_type = DW_MCI_TYPE_EXYNOS7870,
+ }, {
+ .compatible = "samsung,exynos7870-dw-mshc-smu",
+ .ctrl_type = DW_MCI_TYPE_EXYNOS7870_SMU,
+ }, {
.compatible = "axis,artpec8-dw-mshc",
.ctrl_type = DW_MCI_TYPE_ARTPEC8,
},
@@ -85,6 +93,8 @@ static inline u8 dw_mci_exynos_get_ciu_div(struct dw_mci *host)
return EXYNOS4210_FIXED_CIU_CLK_DIV;
else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL64)) + 1;
else
@@ -100,7 +110,8 @@ static void dw_mci_exynos_config_smu(struct dw_mci *host)
* set for non-ecryption mode at this time.
*/
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) {
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU) {
mci_writel(host, MPSBEGIN0, 0);
mci_writel(host, MPSEND0, SDMMC_ENDING_SEC_NR_MAX);
mci_writel(host, MPSCTRL0, SDMMC_MPSCTRL_SECURE_WRITE_BIT |
@@ -126,6 +137,12 @@ static int dw_mci_exynos_priv_init(struct dw_mci *host)
DQS_CTRL_GET_RD_DELAY(priv->saved_strobe_ctrl);
}
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU) {
+ /* Quirk needed for certain Exynos SoCs */
+ host->quirks |= DW_MMC_QUIRK_FIFO64_32;
+ }
+
if (priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) {
/* Quirk needed for the ARTPEC-8 SoC */
host->quirks |= DW_MMC_QUIRK_EXTENDED_TMOUT;
@@ -143,6 +160,8 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
@@ -152,6 +171,8 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
@@ -222,6 +243,8 @@ static int dw_mci_exynos_resume_noirq(struct device *dev)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
@@ -230,6 +253,8 @@ static int dw_mci_exynos_resume_noirq(struct device *dev)
if (clksel & SDMMC_CLKSEL_WAKEUP_INT) {
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
@@ -409,6 +434,8 @@ static inline u8 dw_mci_exynos_get_clksmpl(struct dw_mci *host)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL64));
else
@@ -422,6 +449,8 @@ static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
@@ -429,6 +458,8 @@ static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample)
clksel = SDMMC_CLKSEL_UP_SAMPLE(clksel, sample);
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
@@ -443,6 +474,8 @@ static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
@@ -453,6 +486,8 @@ static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
@@ -632,6 +667,10 @@ static const struct of_device_id dw_mci_exynos_match[] = {
.data = &exynos_drv_data, },
{ .compatible = "samsung,exynos7-dw-mshc-smu",
.data = &exynos_drv_data, },
+ { .compatible = "samsung,exynos7870-dw-mshc",
+ .data = &exynos_drv_data, },
+ { .compatible = "samsung,exynos7870-dw-mshc-smu",
+ .data = &exynos_drv_data, },
{ .compatible = "axis,artpec8-dw-mshc",
.data = &artpec_drv_data, },
{},
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index f73b84bae0c4..6ebb3d1eeb4d 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -1112,26 +1112,26 @@ int renesas_sdhi_probe(struct platform_device *pdev,
num_irqs = platform_irq_count(pdev);
if (num_irqs < 0) {
ret = num_irqs;
- goto eirq;
+ goto edisclk;
}
/* There must be at least one IRQ source */
if (!num_irqs) {
ret = -ENXIO;
- goto eirq;
+ goto edisclk;
}
for (i = 0; i < num_irqs; i++) {
irq = platform_get_irq(pdev, i);
if (irq < 0) {
ret = irq;
- goto eirq;
+ goto edisclk;
}
ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
dev_name(&pdev->dev), host);
if (ret)
- goto eirq;
+ goto edisclk;
}
ret = tmio_mmc_host_probe(host);
@@ -1143,8 +1143,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
return ret;
-eirq:
- tmio_mmc_host_remove(host);
edisclk:
renesas_sdhi_clk_disable(host);
efree:
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index e3d39311fdc7..3fd898647237 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1873,7 +1873,7 @@ static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
if (!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS))
return 0;
- ice = of_qcom_ice_get(dev);
+ ice = devm_of_qcom_ice_get(dev);
if (ice == ERR_PTR(-EOPNOTSUPP)) {
dev_warn(dev, "Disabling inline encryption support\n");
ice = NULL;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 1f0bd723f011..13a84b9309e0 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -610,8 +610,12 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
sdhci_set_power(host, mode, vdd);
- if (mode == MMC_POWER_OFF)
+ if (mode == MMC_POWER_OFF) {
+ if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD ||
+ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BYT_SD)
+ usleep_range(15000, 17500);
return;
+ }
/*
* Bus power might not enable after D3 -> D0 transition due to the
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index f4a7733a8ad2..5f91b44891f9 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2065,10 +2065,15 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
host->mmc->actual_clock = 0;
- sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if (clk & SDHCI_CLOCK_CARD_EN)
+ sdhci_writew(host, clk & ~SDHCI_CLOCK_CARD_EN,
+ SDHCI_CLOCK_CONTROL);
- if (clock == 0)
+ if (clock == 0) {
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
return;
+ }
clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
sdhci_enable_clk(host, clk);
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index f75c31815ab0..73385ff4c0f3 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -155,6 +155,7 @@ struct sdhci_am654_data {
u32 tuning_loop;
#define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0)
+#define SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA BIT(1)
};
struct window {
@@ -166,6 +167,7 @@ struct window {
struct sdhci_am654_driver_data {
const struct sdhci_pltfm_data *pdata;
u32 flags;
+ u32 quirks;
#define IOMUX_PRESENT (1 << 0)
#define FREQSEL_2_BIT (1 << 1)
#define STRBSEL_4_BIT (1 << 2)
@@ -356,6 +358,29 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host,
sdhci_set_clock(host, clock);
}
+static int sdhci_am654_start_signal_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+ int ret;
+
+ if ((sdhci_am654->quirks & SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA) &&
+ ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+ ret = mmc_regulator_set_vqmmc(mmc, ios);
+ if (ret < 0) {
+ pr_err("%s: Switching to 1.8V signalling voltage failed,\n",
+ mmc_hostname(mmc));
+ return -EIO;
+ }
+ }
+ return 0;
+ }
+
+ return sdhci_start_signal_voltage_switch(mmc, ios);
+}
+
static u8 sdhci_am654_write_power_on(struct sdhci_host *host, u8 val, int reg)
{
writeb(val, host->ioaddr + reg);
@@ -650,6 +675,12 @@ static const struct sdhci_am654_driver_data sdhci_j721e_4bit_drvdata = {
.flags = IOMUX_PRESENT,
};
+static const struct sdhci_am654_driver_data sdhci_am62_4bit_drvdata = {
+ .pdata = &sdhci_j721e_4bit_pdata,
+ .flags = IOMUX_PRESENT,
+ .quirks = SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA,
+};
+
static const struct soc_device_attribute sdhci_am654_devices[] = {
{ .family = "AM65X",
.revision = "SR1.0",
@@ -872,7 +903,7 @@ static const struct of_device_id sdhci_am654_of_match[] = {
},
{
.compatible = "ti,am62-sdhci",
- .data = &sdhci_j721e_4bit_drvdata,
+ .data = &sdhci_am62_4bit_drvdata,
},
{ /* sentinel */ }
};
@@ -906,6 +937,7 @@ static int sdhci_am654_probe(struct platform_device *pdev)
pltfm_host = sdhci_priv(host);
sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
sdhci_am654->flags = drvdata->flags;
+ sdhci_am654->quirks = drvdata->quirks;
clk_xin = devm_clk_get(dev, "clk_xin");
if (IS_ERR(clk_xin)) {
@@ -940,6 +972,7 @@ static int sdhci_am654_probe(struct platform_device *pdev)
goto err_pltfm_free;
}
+ host->mmc_host_ops.start_signal_voltage_switch = sdhci_am654_start_signal_voltage_switch;
host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
pm_runtime_get_noresume(dev);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 4da5fcb7def4..203d3467dcbc 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2551,7 +2551,7 @@ static int __bond_release_one(struct net_device *bond_dev,
RCU_INIT_POINTER(bond->current_arp_slave, NULL);
- if (!all && (!bond->params.fail_over_mac ||
+ if (!all && (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond_has_slaves(bond))
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 399844809bbe..bb6071a758f3 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -324,7 +324,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
/* Check if we need custom RAMINIT via syscon. Mostly for TI
* platforms. Only supported with DT boot.
*/
- if (np && of_property_read_bool(np, "syscon-raminit")) {
+ if (np && of_property_present(np, "syscon-raminit")) {
u32 id;
struct c_can_raminit *raminit = &priv->raminit_sys;
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index fa04a7ced02b..efc40125593d 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -16,6 +16,7 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/timer.h>
+#include <net/netdev_queues.h>
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
@@ -410,10 +411,13 @@ struct kvaser_pciefd_can {
void __iomem *reg_base;
struct can_berr_counter bec;
u8 cmd_seq;
+ u8 tx_max_count;
+ u8 tx_idx;
+ u8 ack_idx;
int err_rep_cnt;
- int echo_idx;
+ unsigned int completed_tx_pkts;
+ unsigned int completed_tx_bytes;
spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
- spinlock_t echo_lock; /* Locks the message echo buffer */
struct timer_list bec_poll_timer;
struct completion start_comp, flush_comp;
};
@@ -714,6 +718,9 @@ static int kvaser_pciefd_open(struct net_device *netdev)
int ret;
struct kvaser_pciefd_can *can = netdev_priv(netdev);
+ can->tx_idx = 0;
+ can->ack_idx = 0;
+
ret = open_candev(netdev);
if (ret)
return ret;
@@ -745,21 +752,26 @@ static int kvaser_pciefd_stop(struct net_device *netdev)
del_timer(&can->bec_poll_timer);
}
can->can.state = CAN_STATE_STOPPED;
+ netdev_reset_queue(netdev);
close_candev(netdev);
return ret;
}
+static unsigned int kvaser_pciefd_tx_avail(const struct kvaser_pciefd_can *can)
+{
+ return can->tx_max_count - (READ_ONCE(can->tx_idx) - READ_ONCE(can->ack_idx));
+}
+
static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
- struct kvaser_pciefd_can *can,
+ struct can_priv *can, u8 seq,
struct sk_buff *skb)
{
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
int packet_size;
- int seq = can->echo_idx;
memset(p, 0, sizeof(*p));
- if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ if (can->ctrlmode & CAN_CTRLMODE_ONE_SHOT)
p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
if (cf->can_id & CAN_RTR_FLAG)
@@ -782,7 +794,7 @@ static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
} else {
p->header[1] |=
FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK,
- can_get_cc_dlc((struct can_frame *)cf, can->can.ctrlmode));
+ can_get_cc_dlc((struct can_frame *)cf, can->ctrlmode));
}
p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq);
@@ -797,22 +809,24 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct kvaser_pciefd_can *can = netdev_priv(netdev);
- unsigned long irq_flags;
struct kvaser_pciefd_tx_packet packet;
+ unsigned int seq = can->tx_idx & (can->can.echo_skb_max - 1);
+ unsigned int frame_len;
int nr_words;
- u8 count;
if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
+ if (!netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1))
+ return NETDEV_TX_BUSY;
- nr_words = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
+ nr_words = kvaser_pciefd_prepare_tx_packet(&packet, &can->can, seq, skb);
- spin_lock_irqsave(&can->echo_lock, irq_flags);
/* Prepare and save echo skb in internal slot */
- can_put_echo_skb(skb, netdev, can->echo_idx, 0);
-
- /* Move echo index to the next slot */
- can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
+ WRITE_ONCE(can->can.echo_skb[seq], NULL);
+ frame_len = can_skb_get_frame_len(skb);
+ can_put_echo_skb(skb, netdev, seq, frame_len);
+ netdev_sent_queue(netdev, frame_len);
+ WRITE_ONCE(can->tx_idx, can->tx_idx + 1);
/* Write header to fifo */
iowrite32(packet.header[0],
@@ -836,14 +850,7 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
}
- count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
- ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
- /* No room for a new message, stop the queue until at least one
- * successful transmit
- */
- if (count >= can->can.echo_skb_max || can->can.echo_skb[can->echo_idx])
- netif_stop_queue(netdev);
- spin_unlock_irqrestore(&can->echo_lock, irq_flags);
+ netif_subqueue_maybe_stop(netdev, 0, kvaser_pciefd_tx_avail(can), 1, 1);
return NETDEV_TX_OK;
}
@@ -970,6 +977,8 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
can->kv_pcie = pcie;
can->cmd_seq = 0;
can->err_rep_cnt = 0;
+ can->completed_tx_pkts = 0;
+ can->completed_tx_bytes = 0;
can->bec.txerr = 0;
can->bec.rxerr = 0;
@@ -983,11 +992,10 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
tx_nr_packets_max =
FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK,
ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
+ can->tx_max_count = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
can->can.clock.freq = pcie->freq;
- can->can.echo_skb_max = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1);
- can->echo_idx = 0;
- spin_lock_init(&can->echo_lock);
+ can->can.echo_skb_max = roundup_pow_of_two(can->tx_max_count);
spin_lock_init(&can->lock);
can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
@@ -1201,7 +1209,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
skb = alloc_canfd_skb(priv->dev, &cf);
if (!skb) {
priv->dev->stats.rx_dropped++;
- return -ENOMEM;
+ return 0;
}
cf->len = can_fd_dlc2len(dlc);
@@ -1213,7 +1221,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf);
if (!skb) {
priv->dev->stats.rx_dropped++;
- return -ENOMEM;
+ return 0;
}
can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->ctrlmode);
}
@@ -1231,7 +1239,9 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
priv->dev->stats.rx_packets++;
kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
- return netif_rx(skb);
+ netif_rx(skb);
+
+ return 0;
}
static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
@@ -1510,19 +1520,21 @@ static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
netdev_dbg(can->can.dev, "Packet was flushed\n");
} else {
int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]);
- int len;
- u8 count;
+ unsigned int len, frame_len = 0;
struct sk_buff *skb;
+ if (echo_idx != (can->ack_idx & (can->can.echo_skb_max - 1)))
+ return 0;
skb = can->can.echo_skb[echo_idx];
- if (skb)
- kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
- len = can_get_echo_skb(can->can.dev, echo_idx, NULL);
- count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK,
- ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG));
+ if (!skb)
+ return 0;
+ kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp);
+ len = can_get_echo_skb(can->can.dev, echo_idx, &frame_len);
- if (count < can->can.echo_skb_max && netif_queue_stopped(can->can.dev))
- netif_wake_queue(can->can.dev);
+ /* Pairs with barrier in kvaser_pciefd_start_xmit() */
+ smp_store_release(&can->ack_idx, can->ack_idx + 1);
+ can->completed_tx_pkts++;
+ can->completed_tx_bytes += frame_len;
if (!one_shot_fail) {
can->can.dev->stats.tx_bytes += len;
@@ -1638,32 +1650,51 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
{
int pos = 0;
int res = 0;
+ unsigned int i;
do {
res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf);
} while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
+ /* Report ACKs in this buffer to BQL en masse for correct periods */
+ for (i = 0; i < pcie->nr_channels; ++i) {
+ struct kvaser_pciefd_can *can = pcie->can[i];
+
+ if (!can->completed_tx_pkts)
+ continue;
+ netif_subqueue_completed_wake(can->can.dev, 0,
+ can->completed_tx_pkts,
+ can->completed_tx_bytes,
+ kvaser_pciefd_tx_avail(can), 1);
+ can->completed_tx_pkts = 0;
+ can->completed_tx_bytes = 0;
+ }
+
return res;
}
-static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
+static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
{
+ void __iomem *srb_cmd_reg = KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG;
u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
+ iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
+
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
kvaser_pciefd_read_buffer(pcie, 0);
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, srb_cmd_reg); /* Rearm buffer */
+ }
- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
kvaser_pciefd_read_buffer(pcie, 1);
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, srb_cmd_reg); /* Rearm buffer */
+ }
if (unlikely(irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
irq & KVASER_PCIEFD_SRB_IRQ_DUF1))
dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
-
- iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
- return irq;
}
static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
@@ -1691,29 +1722,22 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
- u32 srb_irq = 0;
- u32 srb_release = 0;
int i;
if (!(pci_irq & irq_mask->all))
return IRQ_NONE;
+ iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
+
if (pci_irq & irq_mask->kcan_rx0)
- srb_irq = kvaser_pciefd_receive_irq(pcie);
+ kvaser_pciefd_receive_irq(pcie);
for (i = 0; i < pcie->nr_channels; i++) {
if (pci_irq & irq_mask->kcan_tx[i])
kvaser_pciefd_transmit_irq(pcie->can[i]);
}
- if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
- srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0;
-
- if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
- srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1;
-
- if (srb_release)
- iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
+ iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
return IRQ_HANDLED;
}
@@ -1733,13 +1757,22 @@ static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
}
}
+static void kvaser_pciefd_disable_irq_srcs(struct kvaser_pciefd *pcie)
+{
+ unsigned int i;
+
+ /* Masking PCI_IRQ is insufficient as running ISR will unmask it */
+ iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
+ for (i = 0; i < pcie->nr_channels; ++i)
+ iowrite32(0, pcie->can[i]->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+}
+
static int kvaser_pciefd_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int ret;
struct kvaser_pciefd *pcie;
const struct kvaser_pciefd_irq_mask *irq_mask;
- void __iomem *irq_en_base;
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -1805,8 +1838,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
/* Enable PCI interrupts */
- irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie);
- iowrite32(irq_mask->all, irq_en_base);
+ iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
/* Ready the DMA buffers */
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
@@ -1820,8 +1852,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
return 0;
err_free_irq:
- /* Disable PCI interrupts */
- iowrite32(0, irq_en_base);
+ kvaser_pciefd_disable_irq_srcs(pcie);
free_irq(pcie->pci->irq, pcie);
err_pci_free_irq_vectors:
@@ -1844,35 +1875,26 @@ err_disable_pci:
return ret;
}
-static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
-{
- int i;
-
- for (i = 0; i < pcie->nr_channels; i++) {
- struct kvaser_pciefd_can *can = pcie->can[i];
-
- if (can) {
- iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
- unregister_candev(can->can.dev);
- del_timer(&can->bec_poll_timer);
- kvaser_pciefd_pwm_stop(can);
- free_candev(can->can.dev);
- }
- }
-}
-
static void kvaser_pciefd_remove(struct pci_dev *pdev)
{
struct kvaser_pciefd *pcie = pci_get_drvdata(pdev);
+ unsigned int i;
- kvaser_pciefd_remove_all_ctrls(pcie);
+ for (i = 0; i < pcie->nr_channels; ++i) {
+ struct kvaser_pciefd_can *can = pcie->can[i];
- /* Disable interrupts */
- iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
- iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
+ unregister_candev(can->can.dev);
+ del_timer(&can->bec_poll_timer);
+ kvaser_pciefd_pwm_stop(can);
+ }
+ kvaser_pciefd_disable_irq_srcs(pcie);
free_irq(pcie->pci->irq, pcie);
pci_free_irq_vectors(pcie->pci);
+
+ for (i = 0; i < pcie->nr_channels; ++i)
+ free_candev(pcie->can[i]->can.dev);
+
pci_iounmap(pdev, pcie->reg_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index d025d4163fd1..39ad4442cb81 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -2379,6 +2379,7 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
SET_NETDEV_DEV(net_dev, dev);
m_can_of_parse_mram(class_dev, mram_config_vals);
+ spin_lock_init(&class_dev->tx_handling_spinlock);
out:
return class_dev;
}
@@ -2463,9 +2464,9 @@ EXPORT_SYMBOL_GPL(m_can_class_register);
void m_can_class_unregister(struct m_can_classdev *cdev)
{
+ unregister_candev(cdev->net);
if (cdev->is_peripheral)
can_rx_offload_del(&cdev->offload);
- unregister_candev(cdev->net);
}
EXPORT_SYMBOL_GPL(m_can_class_unregister);
diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c
index 7107a37da36c..c3fb3176ce42 100644
--- a/drivers/net/can/rockchip/rockchip_canfd-core.c
+++ b/drivers/net/can/rockchip/rockchip_canfd-core.c
@@ -937,8 +937,8 @@ static void rkcanfd_remove(struct platform_device *pdev)
struct rkcanfd_priv *priv = platform_get_drvdata(pdev);
struct net_device *ndev = priv->ndev;
- can_rx_offload_del(&priv->offload);
rkcanfd_unregister(priv);
+ can_rx_offload_del(&priv->offload);
free_candev(ndev);
}
diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c
index 24c6622d36bd..58ff2ec1d975 100644
--- a/drivers/net/can/slcan/slcan-core.c
+++ b/drivers/net/can/slcan/slcan-core.c
@@ -71,12 +71,21 @@ MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>");
#define SLCAN_CMD_LEN 1
#define SLCAN_SFF_ID_LEN 3
#define SLCAN_EFF_ID_LEN 8
+#define SLCAN_DATA_LENGTH_LEN 1
+#define SLCAN_ERROR_LEN 1
#define SLCAN_STATE_LEN 1
#define SLCAN_STATE_BE_RXCNT_LEN 3
#define SLCAN_STATE_BE_TXCNT_LEN 3
-#define SLCAN_STATE_FRAME_LEN (1 + SLCAN_CMD_LEN + \
- SLCAN_STATE_BE_RXCNT_LEN + \
- SLCAN_STATE_BE_TXCNT_LEN)
+#define SLCAN_STATE_MSG_LEN (SLCAN_CMD_LEN + \
+ SLCAN_STATE_LEN + \
+ SLCAN_STATE_BE_RXCNT_LEN + \
+ SLCAN_STATE_BE_TXCNT_LEN)
+#define SLCAN_ERROR_MSG_LEN_MIN (SLCAN_CMD_LEN + \
+ SLCAN_ERROR_LEN + \
+ SLCAN_DATA_LENGTH_LEN)
+#define SLCAN_FRAME_MSG_LEN_MIN (SLCAN_CMD_LEN + \
+ SLCAN_SFF_ID_LEN + \
+ SLCAN_DATA_LENGTH_LEN)
struct slcan {
struct can_priv can;
@@ -176,6 +185,9 @@ static void slcan_bump_frame(struct slcan *sl)
u32 tmpid;
char *cmd = sl->rbuff;
+ if (sl->rcount < SLCAN_FRAME_MSG_LEN_MIN)
+ return;
+
skb = alloc_can_skb(sl->dev, &cf);
if (unlikely(!skb)) {
sl->dev->stats.rx_dropped++;
@@ -281,7 +293,7 @@ static void slcan_bump_state(struct slcan *sl)
return;
}
- if (state == sl->can.state || sl->rcount < SLCAN_STATE_FRAME_LEN)
+ if (state == sl->can.state || sl->rcount != SLCAN_STATE_MSG_LEN)
return;
cmd += SLCAN_STATE_BE_RXCNT_LEN + SLCAN_CMD_LEN + 1;
@@ -328,6 +340,9 @@ static void slcan_bump_err(struct slcan *sl)
bool rx_errors = false, tx_errors = false, rx_over_errors = false;
int i, len;
+ if (sl->rcount < SLCAN_ERROR_MSG_LEN_MIN)
+ return;
+
/* get len from sanitized ASCII value */
len = cmd[1];
if (len >= '0' && len < '9')
@@ -456,8 +471,7 @@ static void slcan_bump(struct slcan *sl)
static void slcan_unesc(struct slcan *sl, unsigned char s)
{
if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
- if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
- sl->rcount > 4)
+ if (!test_and_clear_bit(SLF_ERROR, &sl->flags))
slcan_bump(sl);
sl->rcount = 0;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 3bc56517fe7a..c30b04f8fc0d 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -75,6 +75,24 @@ static const struct can_bittiming_const mcp251xfd_data_bittiming_const = {
.brp_inc = 1,
};
+/* The datasheet of the mcp2518fd (DS20006027B) specifies a range of
+ * [-64,63] for TDCO, indicating a relative TDCO.
+ *
+ * Manual tests have shown, that using a relative TDCO configuration
+ * results in bus off, while an absolute configuration works.
+ *
+ * For TDCO use the max value (63) from the data sheet, but 0 as the
+ * minimum.
+ */
+static const struct can_tdc_const mcp251xfd_tdc_const = {
+ .tdcv_min = 0,
+ .tdcv_max = 63,
+ .tdco_min = 0,
+ .tdco_max = 63,
+ .tdcf_min = 0,
+ .tdcf_max = 0,
+};
+
static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
{
switch (model) {
@@ -510,8 +528,7 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
{
const struct can_bittiming *bt = &priv->can.bittiming;
const struct can_bittiming *dbt = &priv->can.data_bittiming;
- u32 val = 0;
- s8 tdco;
+ u32 tdcmod, val = 0;
int err;
/* CAN Control Register
@@ -575,11 +592,16 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
return err;
/* Transmitter Delay Compensation */
- tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1),
- -64, 63);
- val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK,
- MCP251XFD_REG_TDC_TDCMOD_AUTO) |
- FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_AUTO)
+ tdcmod = MCP251XFD_REG_TDC_TDCMOD_AUTO;
+ else if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_MANUAL)
+ tdcmod = MCP251XFD_REG_TDC_TDCMOD_MANUAL;
+ else
+ tdcmod = MCP251XFD_REG_TDC_TDCMOD_DISABLED;
+
+ val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, tdcmod) |
+ FIELD_PREP(MCP251XFD_REG_TDC_TDCV_MASK, priv->can.tdc.tdcv) |
+ FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, priv->can.tdc.tdco);
return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val);
}
@@ -2083,10 +2105,12 @@ static int mcp251xfd_probe(struct spi_device *spi)
priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
priv->can.bittiming_const = &mcp251xfd_bittiming_const;
priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
+ priv->can.tdc_const = &mcp251xfd_tdc_const;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
- CAN_CTRLMODE_CC_LEN8_DLC;
+ CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO |
+ CAN_CTRLMODE_TDC_MANUAL;
set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
priv->ndev = ndev;
priv->spi = spi;
@@ -2174,8 +2198,8 @@ static void mcp251xfd_remove(struct spi_device *spi)
struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
struct net_device *ndev = priv->ndev;
- can_rx_offload_del(&priv->offload);
mcp251xfd_unregister(priv);
+ can_rx_offload_del(&priv->offload);
spi->max_speed_hz = priv->spi_max_speed_hz_orig;
free_candev(ndev);
}
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 3b49e87e8ef7..10484d19eaac 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -326,6 +326,26 @@ static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
}
}
+static void b53_set_eap_mode(struct b53_device *dev, int port, int mode)
+{
+ u64 eap_conf;
+
+ if (is5325(dev) || is5365(dev) || dev->chip_id == BCM5389_DEVICE_ID)
+ return;
+
+ b53_read64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), &eap_conf);
+
+ if (is63xx(dev)) {
+ eap_conf &= ~EAP_MODE_MASK_63XX;
+ eap_conf |= (u64)mode << EAP_MODE_SHIFT_63XX;
+ } else {
+ eap_conf &= ~EAP_MODE_MASK;
+ eap_conf |= (u64)mode << EAP_MODE_SHIFT;
+ }
+
+ b53_write64(dev, B53_EAP_PAGE, B53_PORT_EAP_CONF(port), eap_conf);
+}
+
static void b53_set_forwarding(struct b53_device *dev, int enable)
{
u8 mgmt;
@@ -373,15 +393,17 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
}
+ vc1 &= ~VC1_RX_MCST_FWD_EN;
+
if (enable) {
vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
- vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
+ vc1 |= VC1_RX_MCST_UNTAG_EN;
vc4 &= ~VC4_ING_VID_CHECK_MASK;
if (enable_filtering) {
vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
vc5 |= VC5_DROP_VTABLE_MISS;
} else {
- vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
+ vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S;
vc5 &= ~VC5_DROP_VTABLE_MISS;
}
@@ -393,7 +415,7 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
} else {
vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
- vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN);
+ vc1 &= ~VC1_RX_MCST_UNTAG_EN;
vc4 &= ~VC4_ING_VID_CHECK_MASK;
vc5 &= ~VC5_DROP_VTABLE_MISS;
@@ -576,6 +598,25 @@ static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
}
+int b53_setup_port(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+
+ b53_port_set_ucast_flood(dev, port, true);
+ b53_port_set_mcast_flood(dev, port, true);
+ b53_port_set_learning(dev, port, false);
+
+ /* Force all traffic to go to the CPU port to prevent the ASIC from
+ * trying to forward to bridged ports on matching FDB entries, then
+ * dropping frames because it isn't allowed to forward there.
+ */
+ if (dsa_is_user_port(ds, port))
+ b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_setup_port);
+
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct b53_device *dev = ds->priv;
@@ -588,10 +629,6 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
- b53_port_set_ucast_flood(dev, port, true);
- b53_port_set_mcast_flood(dev, port, true);
- b53_port_set_learning(dev, port, false);
-
if (dev->ops->irq_enable)
ret = dev->ops->irq_enable(dev, port);
if (ret)
@@ -722,10 +759,6 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port)
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl);
b53_brcm_hdr_setup(dev->ds, port);
-
- b53_port_set_ucast_flood(dev, port, true);
- b53_port_set_mcast_flood(dev, port, true);
- b53_port_set_learning(dev, port, false);
}
static void b53_enable_mib(struct b53_device *dev)
@@ -761,6 +794,22 @@ static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port)
return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port);
}
+static bool b53_vlan_port_may_join_untagged(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+ struct dsa_port *dp;
+
+ if (!dev->vlan_filtering)
+ return true;
+
+ dp = dsa_to_port(ds, port);
+
+ if (dsa_port_is_cpu(dp))
+ return true;
+
+ return dp->bridge == NULL;
+}
+
int b53_configure_vlan(struct dsa_switch *ds)
{
struct b53_device *dev = ds->priv;
@@ -779,7 +828,7 @@ int b53_configure_vlan(struct dsa_switch *ds)
b53_do_vlan_op(dev, VTA_CMD_CLEAR);
}
- b53_enable_vlan(dev, -1, dev->vlan_enabled, ds->vlan_filtering);
+ b53_enable_vlan(dev, -1, dev->vlan_enabled, dev->vlan_filtering);
/* Create an untagged VLAN entry for the default PVID in case
* CONFIG_VLAN_8021Q is disabled and there are no calls to
@@ -787,26 +836,39 @@ int b53_configure_vlan(struct dsa_switch *ds)
* entry. Do this only when the tagging protocol is not
* DSA_TAG_PROTO_NONE
*/
+ v = &dev->vlans[def_vid];
b53_for_each_port(dev, i) {
- v = &dev->vlans[def_vid];
- v->members |= BIT(i);
+ if (!b53_vlan_port_may_join_untagged(ds, i))
+ continue;
+
+ vl.members |= BIT(i);
if (!b53_vlan_port_needs_forced_tagged(ds, i))
- v->untag = v->members;
- b53_write16(dev, B53_VLAN_PAGE,
- B53_VLAN_PORT_DEF_TAG(i), def_vid);
+ vl.untag = vl.members;
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(i),
+ def_vid);
}
+ b53_set_vlan_entry(dev, def_vid, &vl);
- /* Upon initial call we have not set-up any VLANs, but upon
- * system resume, we need to restore all VLAN entries.
- */
- for (vid = def_vid; vid < dev->num_vlans; vid++) {
- v = &dev->vlans[vid];
+ if (dev->vlan_filtering) {
+ /* Upon initial call we have not set-up any VLANs, but upon
+ * system resume, we need to restore all VLAN entries.
+ */
+ for (vid = def_vid + 1; vid < dev->num_vlans; vid++) {
+ v = &dev->vlans[vid];
- if (!v->members)
- continue;
+ if (!v->members)
+ continue;
- b53_set_vlan_entry(dev, vid, v);
- b53_fast_age_vlan(dev, vid);
+ b53_set_vlan_entry(dev, vid, v);
+ b53_fast_age_vlan(dev, vid);
+ }
+
+ b53_for_each_port(dev, i) {
+ if (!dsa_is_cpu_port(ds, i))
+ b53_write16(dev, B53_VLAN_PAGE,
+ B53_VLAN_PORT_DEF_TAG(i),
+ dev->ports[i].pvid);
+ }
}
return 0;
@@ -1125,7 +1187,9 @@ EXPORT_SYMBOL(b53_setup_devlink_resources);
static int b53_setup(struct dsa_switch *ds)
{
struct b53_device *dev = ds->priv;
+ struct b53_vlan *vl;
unsigned int port;
+ u16 pvid;
int ret;
/* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set
@@ -1133,12 +1197,26 @@ static int b53_setup(struct dsa_switch *ds)
*/
ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE;
+ /* The switch does not tell us the original VLAN for untagged
+ * packets, so keep the CPU port always tagged.
+ */
+ ds->untag_vlan_aware_bridge_pvid = true;
+
ret = b53_reset_switch(dev);
if (ret) {
dev_err(ds->dev, "failed to reset switch\n");
return ret;
}
+ /* setup default vlan for filtering mode */
+ pvid = b53_default_pvid(dev);
+ vl = &dev->vlans[pvid];
+ b53_for_each_port(dev, port) {
+ vl->members |= BIT(port);
+ if (!b53_vlan_port_needs_forced_tagged(ds, port))
+ vl->untag |= BIT(port);
+ }
+
b53_reset_mib(dev);
ret = b53_apply_config(dev);
@@ -1492,7 +1570,10 @@ int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
{
struct b53_device *dev = ds->priv;
- b53_enable_vlan(dev, port, dev->vlan_enabled, vlan_filtering);
+ if (dev->vlan_filtering != vlan_filtering) {
+ dev->vlan_filtering = vlan_filtering;
+ b53_apply_config(dev);
+ }
return 0;
}
@@ -1517,7 +1598,7 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port,
if (vlan->vid >= dev->num_vlans)
return -ERANGE;
- b53_enable_vlan(dev, port, true, ds->vlan_filtering);
+ b53_enable_vlan(dev, port, true, dev->vlan_filtering);
return 0;
}
@@ -1530,18 +1611,29 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct b53_vlan *vl;
+ u16 old_pvid, new_pvid;
int err;
err = b53_vlan_prepare(ds, port, vlan);
if (err)
return err;
- vl = &dev->vlans[vlan->vid];
+ if (vlan->vid == 0)
+ return 0;
- b53_get_vlan_entry(dev, vlan->vid, vl);
+ old_pvid = dev->ports[port].pvid;
+ if (pvid)
+ new_pvid = vlan->vid;
+ else if (!pvid && vlan->vid == old_pvid)
+ new_pvid = b53_default_pvid(dev);
+ else
+ new_pvid = old_pvid;
+ dev->ports[port].pvid = new_pvid;
- if (vlan->vid == 0 && vlan->vid == b53_default_pvid(dev))
- untagged = true;
+ vl = &dev->vlans[vlan->vid];
+
+ if (dsa_is_cpu_port(ds, port))
+ untagged = false;
vl->members |= BIT(port);
if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
@@ -1549,13 +1641,16 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
else
vl->untag &= ~BIT(port);
+ if (!dev->vlan_filtering)
+ return 0;
+
b53_set_vlan_entry(dev, vlan->vid, vl);
b53_fast_age_vlan(dev, vlan->vid);
- if (pvid && !dsa_is_cpu_port(ds, port)) {
+ if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) {
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
- vlan->vid);
- b53_fast_age_vlan(dev, vlan->vid);
+ new_pvid);
+ b53_fast_age_vlan(dev, old_pvid);
}
return 0;
@@ -1570,20 +1665,25 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
struct b53_vlan *vl;
u16 pvid;
- b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
+ if (vlan->vid == 0)
+ return 0;
- vl = &dev->vlans[vlan->vid];
+ pvid = dev->ports[port].pvid;
- b53_get_vlan_entry(dev, vlan->vid, vl);
+ vl = &dev->vlans[vlan->vid];
vl->members &= ~BIT(port);
if (pvid == vlan->vid)
pvid = b53_default_pvid(dev);
+ dev->ports[port].pvid = pvid;
if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
vl->untag &= ~(BIT(port));
+ if (!dev->vlan_filtering)
+ return 0;
+
b53_set_vlan_entry(dev, vlan->vid, vl);
b53_fast_age_vlan(dev, vlan->vid);
@@ -1916,8 +2016,9 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
bool *tx_fwd_offload, struct netlink_ext_ack *extack)
{
struct b53_device *dev = ds->priv;
+ struct b53_vlan *vl;
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
- u16 pvlan, reg;
+ u16 pvlan, reg, pvid;
unsigned int i;
/* On 7278, port 7 which connects to the ASP should only receive
@@ -1926,15 +2027,29 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
if (dev->chip_id == BCM7278_DEVICE_ID && port == 7)
return -EINVAL;
- /* Make this port leave the all VLANs join since we will have proper
- * VLAN entries from now on
- */
- if (is58xx(dev)) {
- b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
- reg &= ~BIT(port);
- if ((reg & BIT(cpu_port)) == BIT(cpu_port))
- reg &= ~BIT(cpu_port);
- b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
+ pvid = b53_default_pvid(dev);
+ vl = &dev->vlans[pvid];
+
+ if (dev->vlan_filtering) {
+ /* Make this port leave the all VLANs join since we will have
+ * proper VLAN entries from now on
+ */
+ if (is58xx(dev)) {
+ b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN,
+ &reg);
+ reg &= ~BIT(port);
+ if ((reg & BIT(cpu_port)) == BIT(cpu_port))
+ reg &= ~BIT(cpu_port);
+ b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN,
+ reg);
+ }
+
+ b53_get_vlan_entry(dev, pvid, vl);
+ vl->members &= ~BIT(port);
+ if (vl->members == BIT(cpu_port))
+ vl->members &= ~BIT(cpu_port);
+ vl->untag = vl->members;
+ b53_set_vlan_entry(dev, pvid, vl);
}
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
@@ -1954,6 +2069,9 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
pvlan |= BIT(i);
}
+ /* Disable redirection of unknown SA to the CPU port */
+ b53_set_eap_mode(dev, port, EAP_MODE_BASIC);
+
/* Configure the local port VLAN control membership to include
* remote ports and update the local port bitmask
*/
@@ -1967,7 +2085,7 @@ EXPORT_SYMBOL(b53_br_join);
void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
{
struct b53_device *dev = ds->priv;
- struct b53_vlan *vl = &dev->vlans[0];
+ struct b53_vlan *vl;
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
unsigned int i;
u16 pvlan, reg, pvid;
@@ -1989,19 +2107,25 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
pvlan &= ~BIT(i);
}
+ /* Enable redirection of unknown SA to the CPU port */
+ b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
+
b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
dev->ports[port].vlan_ctl_mask = pvlan;
pvid = b53_default_pvid(dev);
+ vl = &dev->vlans[pvid];
+
+ if (dev->vlan_filtering) {
+ /* Make this port join all VLANs without VLAN entries */
+ if (is58xx(dev)) {
+ b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
+ reg |= BIT(port);
+ if (!(reg & BIT(cpu_port)))
+ reg |= BIT(cpu_port);
+ b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
+ }
- /* Make this port join all VLANs without VLAN entries */
- if (is58xx(dev)) {
- b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, &reg);
- reg |= BIT(port);
- if (!(reg & BIT(cpu_port)))
- reg |= BIT(cpu_port);
- b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
- } else {
b53_get_vlan_entry(dev, pvid, vl);
vl->members |= BIT(port) | BIT(cpu_port);
vl->untag |= BIT(port) | BIT(cpu_port);
@@ -2300,6 +2424,7 @@ static const struct dsa_switch_ops b53_switch_ops = {
.phy_read = b53_phy_read16,
.phy_write = b53_phy_write16,
.phylink_get_caps = b53_phylink_get_caps,
+ .port_setup = b53_setup_port,
.port_enable = b53_enable_port,
.port_disable = b53_disable_port,
.support_eee = b53_support_eee,
@@ -2744,6 +2869,7 @@ struct b53_device *b53_switch_alloc(struct device *base,
ds->ops = &b53_switch_ops;
ds->phylink_mac_ops = &b53_phylink_mac_ops;
dev->vlan_enabled = true;
+ dev->vlan_filtering = false;
/* Let DSA handle the case were multiple bridges span the same switch
* device and different VLAN awareness settings are requested, which
* would be breaking filtering semantics for any of the other bridge
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 9e9b5bc0c5d6..cc86aa777df5 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -95,6 +95,7 @@ struct b53_pcs {
struct b53_port {
u16 vlan_ctl_mask;
+ u16 pvid;
struct ethtool_keee eee;
};
@@ -146,6 +147,7 @@ struct b53_device {
unsigned int num_vlans;
struct b53_vlan *vlans;
bool vlan_enabled;
+ bool vlan_filtering;
unsigned int num_ports;
struct b53_port *ports;
@@ -380,6 +382,7 @@ enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mprot);
void b53_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror);
+int b53_setup_port(struct dsa_switch *ds, int port);
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
void b53_disable_port(struct dsa_switch *ds, int port);
void b53_brcm_hdr_setup(struct dsa_switch *ds, int port);
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index bfbcb66bef66..5f7a0e5c5709 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -50,6 +50,9 @@
/* Jumbo Frame Registers */
#define B53_JUMBO_PAGE 0x40
+/* EAP Registers */
+#define B53_EAP_PAGE 0x42
+
/* EEE Control Registers Page */
#define B53_EEE_PAGE 0x92
@@ -481,6 +484,17 @@
#define JMS_MAX_SIZE 9724
/*************************************************************************
+ * EAP Page Registers
+ *************************************************************************/
+#define B53_PORT_EAP_CONF(i) (0x20 + 8 * (i))
+#define EAP_MODE_SHIFT 51
+#define EAP_MODE_SHIFT_63XX 50
+#define EAP_MODE_MASK (0x3ull << EAP_MODE_SHIFT)
+#define EAP_MODE_MASK_63XX (0x3ull << EAP_MODE_SHIFT_63XX)
+#define EAP_MODE_BASIC 0
+#define EAP_MODE_SIMPLIFIED 3
+
+/*************************************************************************
* EEE Configuration Page Registers
*************************************************************************/
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index fa2bf3fa9019..454a8c7fd7ee 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1230,6 +1230,7 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.resume = bcm_sf2_sw_resume,
.get_wol = bcm_sf2_sw_get_wol,
.set_wol = bcm_sf2_sw_set_wol,
+ .port_setup = b53_setup_port,
.port_enable = bcm_sf2_port_setup,
.port_disable = bcm_sf2_port_disable,
.support_eee = b53_support_eee,
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 89f0796894af..f95a9aac56ee 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -265,16 +265,70 @@ static void ksz_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface);
+/**
+ * ksz_phylink_mac_disable_tx_lpi() - Callback to signal LPI support (Dummy)
+ * @config: phylink config structure
+ *
+ * This function is a dummy handler. See ksz_phylink_mac_enable_tx_lpi() for
+ * a detailed explanation of EEE/LPI handling in KSZ switches.
+ */
+static void ksz_phylink_mac_disable_tx_lpi(struct phylink_config *config)
+{
+}
+
+/**
+ * ksz_phylink_mac_enable_tx_lpi() - Callback to signal LPI support (Dummy)
+ * @config: phylink config structure
+ * @timer: timer value before entering LPI (unused)
+ * @tx_clock_stop: whether to stop the TX clock in LPI mode (unused)
+ *
+ * This function signals to phylink that the driver architecture supports
+ * LPI management, enabling phylink to control EEE advertisement during
+ * negotiation according to IEEE Std 802.3 (Clause 78).
+ *
+ * Hardware Management of EEE/LPI State:
+ * For KSZ switch ports with integrated PHYs (e.g., KSZ9893R ports 1-2),
+ * observation and testing suggest that the actual EEE / Low Power Idle (LPI)
+ * state transitions are managed autonomously by the hardware based on
+ * the auto-negotiation results. (Note: While the datasheet describes EEE
+ * operation based on negotiation, it doesn't explicitly detail the internal
+ * MAC/PHY interaction, so autonomous hardware management of the MAC state
+ * for LPI is inferred from observed behavior).
+ * This hardware control, consistent with the switch's ability to operate
+ * autonomously via strapping, means MAC-level software intervention is not
+ * required or exposed for managing the LPI state once EEE is negotiated.
+ * (Ref: KSZ9893R Data Sheet DS00002420D, primarily Section 4.7.5 explaining
+ * EEE, also Sections 4.1.7 on Auto-Negotiation and 3.2.1 on Configuration
+ * Straps).
+ *
+ * Additionally, ports configured as MAC interfaces (e.g., KSZ9893R port 3)
+ * lack documented MAC-level LPI control.
+ *
+ * Therefore, this callback performs no action and serves primarily to inform
+ * phylink of LPI awareness and to document the inferred hardware behavior.
+ *
+ * Returns: 0 (Always success)
+ */
+static int ksz_phylink_mac_enable_tx_lpi(struct phylink_config *config,
+ u32 timer, bool tx_clock_stop)
+{
+ return 0;
+}
+
static const struct phylink_mac_ops ksz88x3_phylink_mac_ops = {
.mac_config = ksz88x3_phylink_mac_config,
.mac_link_down = ksz_phylink_mac_link_down,
.mac_link_up = ksz8_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
};
static const struct phylink_mac_ops ksz8_phylink_mac_ops = {
.mac_config = ksz_phylink_mac_config,
.mac_link_down = ksz_phylink_mac_link_down,
.mac_link_up = ksz8_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
};
static const struct ksz_dev_ops ksz88xx_dev_ops = {
@@ -358,6 +412,8 @@ static const struct phylink_mac_ops ksz9477_phylink_mac_ops = {
.mac_config = ksz_phylink_mac_config,
.mac_link_down = ksz_phylink_mac_link_down,
.mac_link_up = ksz9477_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
};
static const struct ksz_dev_ops ksz9477_dev_ops = {
@@ -401,6 +457,8 @@ static const struct phylink_mac_ops lan937x_phylink_mac_ops = {
.mac_config = ksz_phylink_mac_config,
.mac_link_down = ksz_phylink_mac_link_down,
.mac_link_up = ksz9477_phylink_mac_link_up,
+ .mac_disable_tx_lpi = ksz_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = ksz_phylink_mac_enable_tx_lpi,
};
static const struct ksz_dev_ops lan937x_dev_ops = {
@@ -2016,6 +2074,18 @@ static void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
if (dev->dev_ops->get_caps)
dev->dev_ops->get_caps(dev, port, config);
+
+ if (ds->ops->support_eee && ds->ops->support_eee(ds, port)) {
+ memcpy(config->lpi_interfaces, config->supported_interfaces,
+ sizeof(config->lpi_interfaces));
+
+ config->lpi_capabilities = MAC_100FD;
+ if (dev->info->gbit_capable[port])
+ config->lpi_capabilities |= MAC_1000FD;
+
+ /* EEE is fully operational */
+ config->eee_enabled_default = true;
+ }
}
void ksz_r_mib_stats64(struct ksz_device *dev, int port)
@@ -3008,31 +3078,6 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
if (!port)
return MICREL_KSZ8_P1_ERRATA;
break;
- case KSZ8567_CHIP_ID:
- /* KSZ8567R Errata DS80000752C Module 4 */
- case KSZ8765_CHIP_ID:
- case KSZ8794_CHIP_ID:
- case KSZ8795_CHIP_ID:
- /* KSZ879x/KSZ877x/KSZ876x Errata DS80000687C Module 2 */
- case KSZ9477_CHIP_ID:
- /* KSZ9477S Errata DS80000754A Module 4 */
- case KSZ9567_CHIP_ID:
- /* KSZ9567S Errata DS80000756A Module 4 */
- case KSZ9896_CHIP_ID:
- /* KSZ9896C Errata DS80000757A Module 3 */
- case KSZ9897_CHIP_ID:
- case LAN9646_CHIP_ID:
- /* KSZ9897R Errata DS80000758C Module 4 */
- /* Energy Efficient Ethernet (EEE) feature select must be manually disabled
- * The EEE feature is enabled by default, but it is not fully
- * operational. It must be manually disabled through register
- * controls. If not disabled, the PHY ports can auto-negotiate
- * to enable EEE, and this feature can cause link drops when
- * linked to another device supporting EEE.
- *
- * The same item appears in the errata for all switches above.
- */
- return MICREL_NO_EEE;
}
return 0;
@@ -3466,6 +3511,20 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port)
return -EOPNOTSUPP;
}
+/**
+ * ksz_support_eee - Determine Energy Efficient Ethernet (EEE) support for a
+ * port
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number to check
+ *
+ * This function also documents devices where EEE was initially advertised but
+ * later withdrawn due to reliability issues, as described in official errata
+ * documents. These devices are explicitly listed to record known limitations,
+ * even if there is no technical necessity for runtime checks.
+ *
+ * Returns: true if the internal PHY on the given port supports fully
+ * operational EEE, false otherwise.
+ */
static bool ksz_support_eee(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
@@ -3475,15 +3534,35 @@ static bool ksz_support_eee(struct dsa_switch *ds, int port)
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
+ case KSZ9563_CHIP_ID:
+ case KSZ9893_CHIP_ID:
+ return true;
case KSZ8567_CHIP_ID:
+ /* KSZ8567R Errata DS80000752C Module 4 */
+ case KSZ8765_CHIP_ID:
+ case KSZ8794_CHIP_ID:
+ case KSZ8795_CHIP_ID:
+ /* KSZ879x/KSZ877x/KSZ876x Errata DS80000687C Module 2 */
case KSZ9477_CHIP_ID:
- case KSZ9563_CHIP_ID:
+ /* KSZ9477S Errata DS80000754A Module 4 */
case KSZ9567_CHIP_ID:
- case KSZ9893_CHIP_ID:
+ /* KSZ9567S Errata DS80000756A Module 4 */
case KSZ9896_CHIP_ID:
+ /* KSZ9896C Errata DS80000757A Module 3 */
case KSZ9897_CHIP_ID:
case LAN9646_CHIP_ID:
- return true;
+ /* KSZ9897R Errata DS80000758C Module 4 */
+ /* Energy Efficient Ethernet (EEE) feature select must be
+ * manually disabled
+ * The EEE feature is enabled by default, but it is not fully
+ * operational. It must be manually disabled through register
+ * controls. If not disabled, the PHY ports can auto-negotiate
+ * to enable EEE, and this feature can cause link drops when
+ * linked to another device supporting EEE.
+ *
+ * The same item appears in the errata for all switches above.
+ */
+ break;
}
return false;
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 5883eb93efb1..22513f3d56db 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -2541,6 +2541,9 @@ mt7531_setup_common(struct dsa_switch *ds)
struct mt7530_priv *priv = ds->priv;
int ret, i;
+ ds->assisted_learning_on_cpu_port = true;
+ ds->mtu_enforcement_ingress = true;
+
mt753x_trap_frames(priv);
/* Enable and reset MIB counters */
@@ -2688,9 +2691,6 @@ mt7531_setup(struct dsa_switch *ds)
if (ret)
return ret;
- ds->assisted_learning_on_cpu_port = true;
- ds->mtu_enforcement_ingress = true;
-
return 0;
}
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 940f1b71226d..7b35d24c38d7 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1543,7 +1543,7 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
struct tc_taprio_qopt_offload *taprio;
struct ocelot_port *ocelot_port;
struct timespec64 base_ts;
- int port;
+ int i, port;
u32 val;
mutex_lock(&ocelot->fwd_domain_lock);
@@ -1575,6 +1575,9 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M,
QSYS_PARAM_CFG_REG_3);
+ for (i = 0; i < taprio->num_entries; i++)
+ vsc9959_tas_gcl_set(ocelot, i, &taprio->entries[i]);
+
ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
QSYS_TAS_PARAM_CFG_CTRL);
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index f8454f3b6f9c..f674c400f05b 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -2081,6 +2081,7 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
switch (state) {
case BR_STATE_DISABLED:
case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
/* From UM10944 description of DRPDTAG (why put this there?):
* "Management traffic flows to the port regardless of the state
* of the INGRESS flag". So BPDUs are still be allowed to pass.
@@ -2090,11 +2091,6 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
mac[port].egress = false;
mac[port].dyn_learn = false;
break;
- case BR_STATE_LISTENING:
- mac[port].ingress = true;
- mac[port].egress = false;
- mac[port].dyn_learn = false;
- break;
case BR_STATE_LEARNING:
mac[port].ingress = true;
mac[port].egress = false;
diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
index c83a0a80d533..506f682d15c1 100644
--- a/drivers/net/ethernet/amd/pds_core/adminq.c
+++ b/drivers/net/ethernet/amd/pds_core/adminq.c
@@ -5,11 +5,6 @@
#include "core.h"
-struct pdsc_wait_context {
- struct pdsc_qcq *qcq;
- struct completion wait_completion;
-};
-
static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
{
union pds_core_notifyq_comp *comp;
@@ -109,10 +104,10 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq)
q_info = &q->info[q->tail_idx];
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
- /* Copy out the completion data */
- memcpy(q_info->dest, comp, sizeof(*comp));
-
- complete_all(&q_info->wc->wait_completion);
+ if (!completion_done(&q_info->completion)) {
+ memcpy(q_info->dest, comp, sizeof(*comp));
+ complete(&q_info->completion);
+ }
if (cq->tail_idx == cq->num_descs - 1)
cq->done_color = !cq->done_color;
@@ -162,8 +157,7 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data)
static int __pdsc_adminq_post(struct pdsc *pdsc,
struct pdsc_qcq *qcq,
union pds_core_adminq_cmd *cmd,
- union pds_core_adminq_comp *comp,
- struct pdsc_wait_context *wc)
+ union pds_core_adminq_comp *comp)
{
struct pdsc_queue *q = &qcq->q;
struct pdsc_q_info *q_info;
@@ -205,9 +199,9 @@ static int __pdsc_adminq_post(struct pdsc *pdsc,
/* Post the request */
index = q->head_idx;
q_info = &q->info[index];
- q_info->wc = wc;
q_info->dest = comp;
memcpy(q_info->desc, cmd, sizeof(*cmd));
+ reinit_completion(&q_info->completion);
dev_dbg(pdsc->dev, "head_idx %d tail_idx %d\n",
q->head_idx, q->tail_idx);
@@ -231,16 +225,13 @@ int pdsc_adminq_post(struct pdsc *pdsc,
union pds_core_adminq_comp *comp,
bool fast_poll)
{
- struct pdsc_wait_context wc = {
- .wait_completion =
- COMPLETION_INITIALIZER_ONSTACK(wc.wait_completion),
- };
unsigned long poll_interval = 1;
unsigned long poll_jiffies;
unsigned long time_limit;
unsigned long time_start;
unsigned long time_done;
unsigned long remaining;
+ struct completion *wc;
int err = 0;
int index;
@@ -250,20 +241,19 @@ int pdsc_adminq_post(struct pdsc *pdsc,
return -ENXIO;
}
- wc.qcq = &pdsc->adminqcq;
- index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc);
+ index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp);
if (index < 0) {
err = index;
goto err_out;
}
+ wc = &pdsc->adminqcq.q.info[index].completion;
time_start = jiffies;
time_limit = time_start + HZ * pdsc->devcmd_timeout;
do {
/* Timeslice the actual wait to catch IO errors etc early */
poll_jiffies = msecs_to_jiffies(poll_interval);
- remaining = wait_for_completion_timeout(&wc.wait_completion,
- poll_jiffies);
+ remaining = wait_for_completion_timeout(wc, poll_jiffies);
if (remaining)
break;
@@ -292,9 +282,11 @@ int pdsc_adminq_post(struct pdsc *pdsc,
dev_dbg(pdsc->dev, "%s: elapsed %d msecs\n",
__func__, jiffies_to_msecs(time_done - time_start));
- /* Check the results */
- if (time_after_eq(time_done, time_limit))
+ /* Check the results and clear an un-completed timeout */
+ if (time_after_eq(time_done, time_limit) && !completion_done(wc)) {
err = -ETIMEDOUT;
+ complete(wc);
+ }
dev_dbg(pdsc->dev, "read admin queue completion idx %d:\n", index);
dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
index 2babea110991..889a18962270 100644
--- a/drivers/net/ethernet/amd/pds_core/auxbus.c
+++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
@@ -107,9 +107,6 @@ int pds_client_adminq_cmd(struct pds_auxiliary_dev *padev,
dev_dbg(pf->dev, "%s: %s opcode %d\n",
__func__, dev_name(&padev->aux_dev.dev), req->opcode);
- if (pf->state)
- return -ENXIO;
-
/* Wrap the client's request */
cmd.client_request.opcode = PDS_AQ_CMD_CLIENT_CMD;
cmd.client_request.client_id = cpu_to_le16(padev->client_id);
@@ -175,34 +172,31 @@ static struct pds_auxiliary_dev *pdsc_auxbus_dev_register(struct pdsc *cf,
return padev;
}
-int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
+void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
+ struct pds_auxiliary_dev **pd_ptr)
{
struct pds_auxiliary_dev *padev;
- int err = 0;
- if (!cf)
- return -ENODEV;
+ if (!*pd_ptr)
+ return;
mutex_lock(&pf->config_lock);
- padev = pf->vfs[cf->vf_id].padev;
- if (padev) {
- pds_client_unregister(pf, padev->client_id);
- auxiliary_device_delete(&padev->aux_dev);
- auxiliary_device_uninit(&padev->aux_dev);
- padev->client_id = 0;
- }
- pf->vfs[cf->vf_id].padev = NULL;
+ padev = *pd_ptr;
+ pds_client_unregister(pf, padev->client_id);
+ auxiliary_device_delete(&padev->aux_dev);
+ auxiliary_device_uninit(&padev->aux_dev);
+ *pd_ptr = NULL;
mutex_unlock(&pf->config_lock);
- return err;
}
-int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
+int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf,
+ enum pds_core_vif_types vt,
+ struct pds_auxiliary_dev **pd_ptr)
{
struct pds_auxiliary_dev *padev;
char devname[PDS_DEVNAME_LEN];
- enum pds_core_vif_types vt;
unsigned long mask;
u16 vt_support;
int client_id;
@@ -211,6 +205,9 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
if (!cf)
return -ENODEV;
+ if (vt >= PDS_DEV_TYPE_MAX)
+ return -EINVAL;
+
mutex_lock(&pf->config_lock);
mask = BIT_ULL(PDSC_S_FW_DEAD) |
@@ -222,17 +219,10 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
goto out_unlock;
}
- /* We only support vDPA so far, so it is the only one to
- * be verified that it is available in the Core device and
- * enabled in the devlink param. In the future this might
- * become a loop for several VIF types.
- */
-
/* Verify that the type is supported and enabled. It is not
* an error if there is no auxbus device support for this
* VF, it just means something else needs to happen with it.
*/
- vt = PDS_DEV_TYPE_VDPA;
vt_support = !!le16_to_cpu(pf->dev_ident.vif_types[vt]);
if (!(vt_support &&
pf->viftype_status[vt].supported &&
@@ -258,7 +248,7 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
err = PTR_ERR(padev);
goto out_unlock;
}
- pf->vfs[cf->vf_id].padev = padev;
+ *pd_ptr = padev;
out_unlock:
mutex_unlock(&pf->config_lock);
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 536635e57727..3c60d4cf9d0e 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -167,8 +167,10 @@ static void pdsc_q_map(struct pdsc_queue *q, void *base, dma_addr_t base_pa)
q->base = base;
q->base_pa = base_pa;
- for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
+ for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) {
cur->desc = base + (i * q->desc_size);
+ init_completion(&cur->completion);
+ }
}
static void pdsc_cq_map(struct pdsc_cq *cq, void *base, dma_addr_t base_pa)
@@ -325,10 +327,7 @@ static int pdsc_core_init(struct pdsc *pdsc)
size_t sz;
int err;
- /* Scale the descriptor ring length based on number of CPUs and VFs */
- numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus());
- numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev);
- numdescs = roundup_pow_of_two(numdescs);
+ numdescs = PDSC_ADMINQ_MAX_LENGTH;
err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
numdescs,
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index 14522d6d5f86..becd3104473c 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -16,7 +16,7 @@
#define PDSC_WATCHDOG_SECS 5
#define PDSC_QUEUE_NAME_MAX_SZ 16
-#define PDSC_ADMINQ_MIN_LENGTH 16 /* must be a power of two */
+#define PDSC_ADMINQ_MAX_LENGTH 16 /* must be a power of two */
#define PDSC_NOTIFYQ_LENGTH 64 /* must be a power of two */
#define PDSC_TEARDOWN_RECOVERY false
#define PDSC_TEARDOWN_REMOVING true
@@ -96,7 +96,7 @@ struct pdsc_q_info {
unsigned int bytes;
unsigned int nbufs;
struct pdsc_buf_info bufs[PDS_CORE_MAX_FRAGS];
- struct pdsc_wait_context *wc;
+ struct completion completion;
void *dest;
};
@@ -303,8 +303,11 @@ void pdsc_health_thread(struct work_struct *work);
int pdsc_register_notify(struct notifier_block *nb);
void pdsc_unregister_notify(struct notifier_block *nb);
void pdsc_notify(unsigned long event, void *data);
-int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf);
-int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf);
+int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf,
+ enum pds_core_vif_types vt,
+ struct pds_auxiliary_dev **pd_ptr);
+void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
+ struct pds_auxiliary_dev **pd_ptr);
void pdsc_process_adminq(struct pdsc_qcq *qcq);
void pdsc_work_thread(struct work_struct *work);
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
index 44971e71991f..d8dc39da4161 100644
--- a/drivers/net/ethernet/amd/pds_core/devlink.c
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -56,8 +56,11 @@ int pdsc_dl_enable_set(struct devlink *dl, u32 id,
for (vf_id = 0; vf_id < pdsc->num_vfs; vf_id++) {
struct pdsc *vf = pdsc->vfs[vf_id].vf;
- err = ctx->val.vbool ? pdsc_auxbus_dev_add(vf, pdsc) :
- pdsc_auxbus_dev_del(vf, pdsc);
+ if (ctx->val.vbool)
+ err = pdsc_auxbus_dev_add(vf, pdsc, vt_entry->vif_id,
+ &pdsc->vfs[vf_id].padev);
+ else
+ pdsc_auxbus_dev_del(vf, pdsc, &pdsc->vfs[vf_id].padev);
}
return err;
@@ -102,7 +105,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
.fw_control.opcode = PDS_CORE_CMD_FW_CONTROL,
.fw_control.oper = PDS_CORE_FW_GET_LIST,
};
- struct pds_core_fw_list_info fw_list;
+ struct pds_core_fw_list_info fw_list = {};
struct pdsc *pdsc = devlink_priv(dl);
union pds_core_dev_comp comp;
char buf[32];
@@ -115,8 +118,6 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (!err)
memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list));
mutex_unlock(&pdsc->devcmd_lock);
- if (err && err != -EIO)
- return err;
listlen = min(fw_list.num_fw_slots, ARRAY_SIZE(fw_list.fw_names));
for (i = 0; i < listlen; i++) {
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index 660268ff9562..a3a68889137b 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -190,7 +190,8 @@ static int pdsc_init_vf(struct pdsc *vf)
devl_unlock(dl);
pf->vfs[vf->vf_id].vf = vf;
- err = pdsc_auxbus_dev_add(vf, pf);
+ err = pdsc_auxbus_dev_add(vf, pf, PDS_DEV_TYPE_VDPA,
+ &pf->vfs[vf->vf_id].padev);
if (err) {
devl_lock(dl);
devl_unregister(dl);
@@ -417,7 +418,7 @@ static void pdsc_remove(struct pci_dev *pdev)
pf = pdsc_get_pf_struct(pdsc->pdev);
if (!IS_ERR(pf)) {
- pdsc_auxbus_dev_del(pdsc, pf);
+ pdsc_auxbus_dev_del(pdsc, pf, &pf->vfs[pdsc->vf_id].padev);
pf->vfs[pdsc->vf_id].vf = NULL;
}
} else {
@@ -482,7 +483,8 @@ static void pdsc_reset_prepare(struct pci_dev *pdev)
pf = pdsc_get_pf_struct(pdsc->pdev);
if (!IS_ERR(pf))
- pdsc_auxbus_dev_del(pdsc, pf);
+ pdsc_auxbus_dev_del(pdsc, pf,
+ &pf->vfs[pdsc->vf_id].padev);
}
pdsc_unmap_bars(pdsc);
@@ -527,7 +529,8 @@ static void pdsc_reset_done(struct pci_dev *pdev)
pf = pdsc_get_pf_struct(pdsc->pdev);
if (!IS_ERR(pf))
- pdsc_auxbus_dev_add(pdsc, pf);
+ pdsc_auxbus_dev_add(pdsc, pf, PDS_DEV_TYPE_VDPA,
+ &pf->vfs[pdsc->vf_id].padev);
}
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 230726d7b74f..d41b58fad37b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -373,8 +373,13 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
}
/* Set up the header page info */
- xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
- XGBE_SKB_ALLOC_SIZE);
+ if (pdata->netdev->features & NETIF_F_RXCSUM) {
+ xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
+ XGBE_SKB_ALLOC_SIZE);
+ } else {
+ xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
+ pdata->rx_buf_size);
+ }
/* Set up the buffer page info */
xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index f393228d41c7..f1b0fb02b3cd 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -320,6 +320,18 @@ static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
}
+static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!pdata->channel[i]->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0);
+ }
+}
+
static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
unsigned int index, unsigned int val)
{
@@ -3545,8 +3557,12 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_tx_coalesce(pdata);
xgbe_config_rx_buffer_size(pdata);
xgbe_config_tso_mode(pdata);
- xgbe_config_sph_mode(pdata);
- xgbe_config_rss(pdata);
+
+ if (pdata->netdev->features & NETIF_F_RXCSUM) {
+ xgbe_config_sph_mode(pdata);
+ xgbe_config_rss(pdata);
+ }
+
desc_if->wrapper_tx_desc_init(pdata);
desc_if->wrapper_rx_desc_init(pdata);
xgbe_enable_dma_interrupts(pdata);
@@ -3702,5 +3718,9 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->disable_vxlan = xgbe_disable_vxlan;
hw_if->set_vxlan_id = xgbe_set_vxlan_id;
+ /* For Split Header*/
+ hw_if->enable_sph = xgbe_config_sph_mode;
+ hw_if->disable_sph = xgbe_disable_sph_mode;
+
DBGPR("<--xgbe_init_function_ptrs\n");
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 5475867708f4..8bc49259d71a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -2257,10 +2257,17 @@ static int xgbe_set_features(struct net_device *netdev,
if (ret)
return ret;
- if ((features & NETIF_F_RXCSUM) && !rxcsum)
+ if ((features & NETIF_F_RXCSUM) && !rxcsum) {
+ hw_if->enable_sph(pdata);
+ hw_if->enable_vxlan(pdata);
hw_if->enable_rx_csum(pdata);
- else if (!(features & NETIF_F_RXCSUM) && rxcsum)
+ schedule_work(&pdata->restart_work);
+ } else if (!(features & NETIF_F_RXCSUM) && rxcsum) {
+ hw_if->disable_sph(pdata);
+ hw_if->disable_vxlan(pdata);
hw_if->disable_rx_csum(pdata);
+ schedule_work(&pdata->restart_work);
+ }
if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
hw_if->enable_rx_vlan_stripping(pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index d85386cac8d1..ed5d43c16d0e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -865,6 +865,10 @@ struct xgbe_hw_if {
void (*enable_vxlan)(struct xgbe_prv_data *);
void (*disable_vxlan)(struct xgbe_prv_data *);
void (*set_vxlan_id)(struct xgbe_prv_data *);
+
+ /* For Split Header */
+ void (*enable_sph)(struct xgbe_prv_data *pdata);
+ void (*disable_sph)(struct xgbe_prv_data *pdata);
};
/* This structure represents implementation specific routines for an
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 2a91c84aebdb..d7ca847d44c7 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -9,8 +9,6 @@
#include "main.h"
-static const struct acpi_device_id xge_acpi_match[];
-
static int xge_get_resources(struct xge_pdata *pdata)
{
struct platform_device *pdev;
@@ -731,7 +729,7 @@ MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
static struct platform_driver xge_driver = {
.driver = {
.name = "xgene-enet-v2",
- .acpi_match_table = ACPI_PTR(xge_acpi_match),
+ .acpi_match_table = xge_acpi_match,
},
.probe = xge_probe,
.remove = xge_remove,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 1b39574e3fa2..d844cf621dd2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2011,6 +2011,7 @@ static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type,
}
return skb;
vlan_err:
+ skb_mark_for_recycle(skb);
dev_kfree_skb(skb);
return NULL;
}
@@ -3403,6 +3404,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
}
netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
}
+
+ if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
+ bnxt_ptp_free_txts_skbs(bp->ptp_cfg);
}
static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
@@ -5585,6 +5589,8 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
+ if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2)
+ flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT;
req->flags = cpu_to_le32(flags);
req->ver_maj_8b = DRV_VER_MAJ;
req->ver_min_8b = DRV_VER_MIN;
@@ -8385,6 +8391,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
switch (resp->port_partition_type) {
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
+ case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2:
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
bp->port_partition_type = resp->port_partition_type;
@@ -9549,6 +9556,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
@@ -11376,6 +11385,9 @@ static void bnxt_init_napi(struct bnxt *bp)
poll_fn = bnxt_poll_p5;
else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
cp_nr_rings--;
+
+ set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
+
for (i = 0; i < cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
netif_napi_add_config(bp->dev, &bnapi->napi, poll_fn,
@@ -12097,6 +12109,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
struct hwrm_func_drv_if_change_input *req;
bool fw_reset = !bp->irq_tbl;
bool resc_reinit = false;
+ bool caps_change = false;
int rc, retry = 0;
u32 flags = 0;
@@ -12152,8 +12165,11 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
return -ENODEV;
}
- if (resc_reinit || fw_reset) {
- if (fw_reset) {
+ if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE)
+ caps_change = true;
+
+ if (resc_reinit || fw_reset || caps_change) {
+ if (fw_reset || caps_change) {
set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
bnxt_ulp_irq_stop(bp);
@@ -12165,13 +12181,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
return rc;
}
+ /* IRQ will be initialized later in bnxt_request_irq()*/
bnxt_clear_int_mode(bp);
- rc = bnxt_init_int_mode(bp);
- if (rc) {
- clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
- netdev_err(bp->dev, "init int mode failed\n");
- return rc;
- }
}
rc = bnxt_cancel_reservations(bp, fw_reset);
}
@@ -12570,8 +12581,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
- if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
- WRITE_ONCE(bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
@@ -15731,8 +15740,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_rdma_aux_device_del(bp);
- bnxt_ptp_clear(bp);
unregister_netdev(dev);
+ bnxt_ptp_clear(bp);
bnxt_rdma_aux_device_uninit(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index d621fb621f30..f91d9d8eacb9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -2498,6 +2498,7 @@ struct bnxt {
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39)
#define BNXT_FW_CAP_VNIC_RE_FLUSH BIT_ULL(40)
#define BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS BIT_ULL(41)
+ #define BNXT_FW_CAP_NPAR_1_2 BIT_ULL(42)
u32 fw_dbg_cap;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index 7236d8e548ab..a73398c4a3e9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -110,20 +110,30 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
}
}
+ if (cmn_req->req_type ==
+ cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
+ info->dest_buf_size += len;
+
if (info->dest_buf) {
if ((info->seg_start + off + len) <=
BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
- memcpy(info->dest_buf + off, dma_buf, len);
+ u16 copylen = min_t(u16, len,
+ info->dest_buf_size - off);
+
+ memcpy(info->dest_buf + off, dma_buf, copylen);
+ if (copylen < len)
+ break;
} else {
rc = -ENOBUFS;
+ if (cmn_req->req_type ==
+ cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
+ kfree(info->dest_buf);
+ info->dest_buf = NULL;
+ }
break;
}
}
- if (cmn_req->req_type ==
- cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
- info->dest_buf_size += len;
-
if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
break;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 9c5820839514..54208e049598 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2062,6 +2062,17 @@ static int bnxt_get_regs_len(struct net_device *dev)
return reg_len;
}
+#define BNXT_PCIE_32B_ENTRY(start, end) \
+ { offsetof(struct pcie_ctx_hw_stats, start), \
+ offsetof(struct pcie_ctx_hw_stats, end) }
+
+static const struct {
+ u16 start;
+ u16 end;
+} bnxt_pcie_32b_entries[] = {
+ BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]),
+};
+
static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *_p)
{
@@ -2094,12 +2105,27 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
rc = hwrm_req_send(bp, req);
if (!rc) {
- __le64 *src = (__le64 *)hw_pcie_stats;
- u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
- int i;
-
- for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
- dst[i] = le64_to_cpu(src[i]);
+ u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN);
+ u8 *src = (u8 *)hw_pcie_stats;
+ int i, j;
+
+ for (i = 0, j = 0; i < sizeof(*hw_pcie_stats); ) {
+ if (i >= bnxt_pcie_32b_entries[j].start &&
+ i <= bnxt_pcie_32b_entries[j].end) {
+ u32 *dst32 = (u32 *)(dst + i);
+
+ *dst32 = le32_to_cpu(*(__le32 *)(src + i));
+ i += 4;
+ if (i > bnxt_pcie_32b_entries[j].end &&
+ j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1)
+ j++;
+ } else {
+ u64 *dst64 = (u64 *)(dst + i);
+
+ *dst64 = le64_to_cpu(*(__le64 *)(src + i));
+ i += 8;
+ }
+ }
}
hwrm_req_drop(bp, req);
}
@@ -4922,6 +4948,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (!bp->num_tests || !BNXT_PF(bp))
return;
+ memset(buf, 0, sizeof(u64) * bp->num_tests);
if (etest->flags & ETH_TEST_FL_OFFLINE &&
bnxt_ulp_registered(bp->edev)) {
etest->flags |= ETH_TEST_FL_FAILED;
@@ -4929,7 +4956,6 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
return;
}
- memset(buf, 0, sizeof(u64) * bp->num_tests);
if (!netif_running(dev)) {
etest->flags |= ETH_TEST_FL_FAILED;
return;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 2d4e19b96ee7..0669d43472f5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -794,6 +794,27 @@ next_slot:
return HZ;
}
+void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp)
+{
+ struct bnxt_ptp_tx_req *txts_req;
+ u16 cons = ptp->txts_cons;
+
+ /* make sure ptp aux worker finished with
+ * possible BNXT_STATE_OPEN set
+ */
+ ptp_cancel_worker_sync(ptp->ptp_clock);
+
+ ptp->tx_avail = BNXT_MAX_TX_TS;
+ while (cons != ptp->txts_prod) {
+ txts_req = &ptp->txts_req[cons];
+ if (!IS_ERR_OR_NULL(txts_req->tx_skb))
+ dev_kfree_skb_any(txts_req->tx_skb);
+ cons = NEXT_TXTS(cons);
+ }
+ ptp->txts_cons = cons;
+ ptp_schedule_worker(ptp->ptp_clock, 0);
+}
+
int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod)
{
spin_lock_bh(&ptp->ptp_tx_lock);
@@ -1105,7 +1126,6 @@ out:
void bnxt_ptp_clear(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- int i;
if (!ptp)
return;
@@ -1117,12 +1137,5 @@ void bnxt_ptp_clear(struct bnxt *bp)
kfree(ptp->ptp_info.pin_config);
ptp->ptp_info.pin_config = NULL;
- for (i = 0; i < BNXT_MAX_TX_TS; i++) {
- if (ptp->txts_req[i].tx_skb) {
- dev_kfree_skb_any(ptp->txts_req[i].tx_skb);
- ptp->txts_req[i].tx_skb = NULL;
- }
- }
-
bnxt_unmap_ptp_regs(bp);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index a95f05e9c579..0481161d26ef 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -162,6 +162,7 @@ int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
void bnxt_ptp_reapply_pps(struct bnxt *bp);
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
+void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp);
int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod);
void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod);
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index c1f57d96e63f..e3cc26472c2f 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1002,22 +1002,15 @@ static void macb_update_stats(struct macb *bp)
static int macb_halt_tx(struct macb *bp)
{
- unsigned long halt_time, timeout;
- u32 status;
+ u32 status;
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
- timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
- do {
- halt_time = jiffies;
- status = macb_readl(bp, TSR);
- if (!(status & MACB_BIT(TGO)))
- return 0;
-
- udelay(250);
- } while (time_before(halt_time, timeout));
-
- return -ETIMEDOUT;
+ /* Poll TSR until TGO is cleared or timeout. */
+ return read_poll_timeout_atomic(macb_readl, status,
+ !(status & MACB_BIT(TGO)),
+ 250, MACB_HALT_TIMEOUT, false,
+ bp, TSR);
}
static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget)
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index d0ea92607870..6bf8a7aeef90 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -352,7 +352,7 @@ parse_eeprom (struct net_device *dev)
eth_hw_addr_set(dev, psrom->mac_addr);
if (np->chip_id == CHIP_IP1000A) {
- np->led_mode = psrom->led_mode;
+ np->led_mode = le16_to_cpu(psrom->led_mode);
return 0;
}
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index 195dc6cfd895..0e33e2eaae96 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -335,7 +335,7 @@ typedef struct t_SROM {
u16 sub_system_id; /* 0x06 */
u16 pci_base_1; /* 0x08 (IP1000A only) */
u16 pci_base_2; /* 0x0a (IP1000A only) */
- u16 led_mode; /* 0x0c (IP1000A only) */
+ __le16 led_mode; /* 0x0c (IP1000A only) */
u16 reserved1[9]; /* 0x0e-0x1f */
u8 mac_addr[6]; /* 0x20-0x25 */
u8 reserved2[10]; /* 0x26-0x2f */
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 0d030cb0b21c..63aeb400051c 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -67,6 +67,8 @@
#define TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE (TSNEP_TX_TYPE_XDP_NDO | TSNEP_TX_TYPE_MAP_PAGE)
#define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO)
#define TSNEP_TX_TYPE_XSK BIT(12)
+#define TSNEP_TX_TYPE_TSTAMP BIT(13)
+#define TSNEP_TX_TYPE_SKB_TSTAMP (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_TSTAMP)
#define TSNEP_XDP_TX BIT(0)
#define TSNEP_XDP_REDIRECT BIT(1)
@@ -387,8 +389,7 @@ static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
if (entry->skb) {
entry->properties = length & TSNEP_DESC_LENGTH_MASK;
entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
- if ((entry->type & TSNEP_TX_TYPE_SKB) &&
- (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS))
+ if ((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP)
entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
/* toggle user flag to prevent false acknowledge
@@ -480,7 +481,8 @@ static int tsnep_tx_map_frag(skb_frag_t *frag, struct tsnep_tx_entry *entry,
return mapped;
}
-static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
+static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count,
+ bool do_tstamp)
{
struct device *dmadev = tx->adapter->dmadev;
struct tsnep_tx_entry *entry;
@@ -506,6 +508,9 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
entry->type = TSNEP_TX_TYPE_SKB_INLINE;
mapped = 0;
}
+
+ if (do_tstamp)
+ entry->type |= TSNEP_TX_TYPE_TSTAMP;
} else {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
@@ -559,11 +564,12 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
struct tsnep_tx *tx)
{
- int count = 1;
struct tsnep_tx_entry *entry;
+ bool do_tstamp = false;
+ int count = 1;
int length;
- int i;
int retval;
+ int i;
if (skb_shinfo(skb)->nr_frags > 0)
count += skb_shinfo(skb)->nr_frags;
@@ -580,7 +586,13 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
entry = &tx->entry[tx->write];
entry->skb = skb;
- retval = tsnep_tx_map(skb, tx, count);
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ tx->adapter->hwtstamp_config.tx_type == HWTSTAMP_TX_ON) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ do_tstamp = true;
+ }
+
+ retval = tsnep_tx_map(skb, tx, count, do_tstamp);
if (retval < 0) {
tsnep_tx_unmap(tx, tx->write, count);
dev_kfree_skb_any(entry->skb);
@@ -592,9 +604,6 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
}
length = retval;
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-
for (i = 0; i < count; i++)
tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length,
i == count - 1);
@@ -845,8 +854,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
length = tsnep_tx_unmap(tx, tx->read, count);
- if ((entry->type & TSNEP_TX_TYPE_SKB) &&
- (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
+ if (((entry->type & TSNEP_TX_TYPE_SKB_TSTAMP) == TSNEP_TX_TYPE_SKB_TSTAMP) &&
(__le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
struct skb_shared_hwtstamps hwtstamps;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 2106861463e4..3ee52f4b1166 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -1850,6 +1850,16 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
}
}
+static void enetc_bulk_flip_buff(struct enetc_bdr *rx_ring, int rx_ring_first,
+ int rx_ring_last)
+{
+ while (rx_ring_first != rx_ring_last) {
+ enetc_flip_rx_buff(rx_ring,
+ &rx_ring->rx_swbd[rx_ring_first]);
+ enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
+ }
+}
+
static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
struct napi_struct *napi, int work_limit,
struct bpf_prog *prog)
@@ -1868,11 +1878,10 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
while (likely(rx_frm_cnt < work_limit)) {
union enetc_rx_bd *rxbd, *orig_rxbd;
- int orig_i, orig_cleaned_cnt;
struct xdp_buff xdp_buff;
struct sk_buff *skb;
+ int orig_i, err;
u32 bd_status;
- int err;
rxbd = enetc_rxbd(rx_ring, i);
bd_status = le32_to_cpu(rxbd->r.lstatus);
@@ -1887,7 +1896,6 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
break;
orig_rxbd = rxbd;
- orig_cleaned_cnt = cleaned_cnt;
orig_i = i;
enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
@@ -1915,15 +1923,21 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
rx_ring->stats.xdp_drops++;
break;
case XDP_PASS:
- rxbd = orig_rxbd;
- cleaned_cnt = orig_cleaned_cnt;
- i = orig_i;
-
- skb = enetc_build_skb(rx_ring, bd_status, &rxbd,
- &i, &cleaned_cnt,
- ENETC_RXB_DMA_SIZE_XDP);
- if (unlikely(!skb))
+ skb = xdp_build_skb_from_buff(&xdp_buff);
+ /* Probably under memory pressure, stop NAPI */
+ if (unlikely(!skb)) {
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ rx_ring->stats.xdp_drops++;
goto out;
+ }
+
+ enetc_get_offloads(rx_ring, orig_rxbd, skb);
+
+ /* These buffers are about to be owned by the stack.
+ * Update our buffer cache (the rx_swbd array elements)
+ * with their other page halves.
+ */
+ enetc_bulk_flip_buff(rx_ring, orig_i, i);
napi_gro_receive(napi, skb);
break;
@@ -1965,11 +1979,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
enetc_xdp_drop(rx_ring, orig_i, i);
rx_ring->stats.xdp_redirect_failures++;
} else {
- while (orig_i != i) {
- enetc_flip_rx_buff(rx_ring,
- &rx_ring->rx_swbd[orig_i]);
- enetc_bdr_idx_inc(rx_ring, &orig_i);
- }
+ enetc_bulk_flip_buff(rx_ring, orig_i, i);
xdp_redirect_frm_cnt++;
rx_ring->stats.xdp_redirect++;
}
@@ -3362,7 +3372,8 @@ static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i,
bdr->buffer_offset = ENETC_RXB_PAD;
priv->rx_ring[i] = bdr;
- err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0);
+ err = __xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0,
+ ENETC_RXB_DMA_SIZE_XDP);
if (err)
goto free_vector;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f7c4ce8e9a26..17e9bddb9ddd 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -714,7 +714,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
txq->bd.cur = bdp;
/* Trigger transmission start */
- writel(0, txq->bd.reg_desc_active);
+ if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active))
+ writel(0, txq->bd.reg_desc_active);
return 0;
}
@@ -1093,6 +1098,29 @@ static void fec_enet_enable_ring(struct net_device *ndev)
}
}
+/* Whack a reset. We should wait for this.
+ * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+ * instead of reset MAC itself.
+ */
+static void fec_ctrl_reset(struct fec_enet_private *fep, bool allow_wol)
+{
+ u32 val;
+
+ if (!allow_wol || !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
+ if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
+ ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
+ writel(0, fep->hwp + FEC_ECNTRL);
+ } else {
+ writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+ }
+ } else {
+ val = readl(fep->hwp + FEC_ECNTRL);
+ val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
+ writel(val, fep->hwp + FEC_ECNTRL);
+ }
+}
+
/*
* This function is called to start or restart the FEC during a link
* change, transmit timeout, or to reconfigure the FEC. The network
@@ -1109,17 +1137,7 @@ fec_restart(struct net_device *ndev)
if (fep->bufdesc_ex)
fec_ptp_save_state(fep);
- /* Whack a reset. We should wait for this.
- * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
- * instead of reset MAC itself.
- */
- if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
- ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
- writel(0, fep->hwp + FEC_ECNTRL);
- } else {
- writel(1, fep->hwp + FEC_ECNTRL);
- udelay(10);
- }
+ fec_ctrl_reset(fep, false);
/*
* enet-mac reset will reset mac address registers too,
@@ -1373,22 +1391,7 @@ fec_stop(struct net_device *ndev)
if (fep->bufdesc_ex)
fec_ptp_save_state(fep);
- /* Whack a reset. We should wait for this.
- * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
- * instead of reset MAC itself.
- */
- if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
- if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
- writel(0, fep->hwp + FEC_ECNTRL);
- } else {
- writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
- udelay(10);
- }
- } else {
- val = readl(fep->hwp + FEC_ECNTRL);
- val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
- writel(val, fep->hwp + FEC_ECNTRL);
- }
+ fec_ctrl_reset(fep, true);
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 9bbece25552b..3d70c97a0bed 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -60,7 +60,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.name = "tm_qset",
.cmd = HNAE3_DBG_CMD_TM_QSET,
.dentry = HNS3_DBG_DENTRY_TM,
- .buf_len = HNS3_DBG_READ_LEN,
+ .buf_len = HNS3_DBG_READ_LEN_1MB,
.init = hns3_dbg_common_file_init,
},
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 9ff797fb36c4..b03b8758c777 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -473,20 +473,14 @@ static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
writel(mask_en, tqp_vector->mask_addr);
}
-static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
+static void hns3_irq_enable(struct hns3_enet_tqp_vector *tqp_vector)
{
napi_enable(&tqp_vector->napi);
enable_irq(tqp_vector->vector_irq);
-
- /* enable vector */
- hns3_mask_vector_irq(tqp_vector, 1);
}
-static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
+static void hns3_irq_disable(struct hns3_enet_tqp_vector *tqp_vector)
{
- /* disable vector */
- hns3_mask_vector_irq(tqp_vector, 0);
-
disable_irq(tqp_vector->vector_irq);
napi_disable(&tqp_vector->napi);
cancel_work_sync(&tqp_vector->rx_group.dim.work);
@@ -707,11 +701,42 @@ static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
return 0;
}
+static void hns3_enable_irqs_and_tqps(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 i;
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_irq_enable(&priv->tqp_vector[i]);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_mask_vector_irq(&priv->tqp_vector[i], 1);
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_enable(h->kinfo.tqp[i]);
+}
+
+static void hns3_disable_irqs_and_tqps(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_disable(h->kinfo.tqp[i]);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_mask_vector_irq(&priv->tqp_vector[i], 0);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_irq_disable(&priv->tqp_vector[i]);
+}
+
static int hns3_nic_net_up(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
- int i, j;
int ret;
ret = hns3_nic_reset_all_ring(h);
@@ -720,23 +745,13 @@ static int hns3_nic_net_up(struct net_device *netdev)
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- /* enable the vectors */
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_enable(&priv->tqp_vector[i]);
-
- /* enable rcb */
- for (j = 0; j < h->kinfo.num_tqps; j++)
- hns3_tqp_enable(h->kinfo.tqp[j]);
+ hns3_enable_irqs_and_tqps(netdev);
/* start the ae_dev */
ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
if (ret) {
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- while (j--)
- hns3_tqp_disable(h->kinfo.tqp[j]);
-
- for (j = i - 1; j >= 0; j--)
- hns3_vector_disable(&priv->tqp_vector[j]);
+ hns3_disable_irqs_and_tqps(netdev);
}
return ret;
@@ -823,17 +838,9 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h)
static void hns3_nic_net_down(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops;
- int i;
- /* disable vectors */
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_disable(&priv->tqp_vector[i]);
-
- /* disable rcb */
- for (i = 0; i < h->kinfo.num_tqps; i++)
- hns3_tqp_disable(h->kinfo.tqp[i]);
+ hns3_disable_irqs_and_tqps(netdev);
/* stop ae_dev */
ops = priv->ae_handle->ae_algo->ops;
@@ -5864,8 +5871,6 @@ int hns3_set_channels(struct net_device *netdev,
void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hnae3_handle *h = priv->ae_handle;
- int i;
if (!if_running)
return;
@@ -5876,11 +5881,7 @@ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
netif_carrier_off(ndev);
netif_tx_disable(ndev);
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_disable(&priv->tqp_vector[i]);
-
- for (i = 0; i < h->kinfo.num_tqps; i++)
- hns3_tqp_disable(h->kinfo.tqp[i]);
+ hns3_disable_irqs_and_tqps(ndev);
/* delay ring buffer clearing to hns3_reset_notify_uninit_enet
* during reset process, because driver may not be able
@@ -5896,7 +5897,6 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
- int i;
if (!if_running)
return;
@@ -5912,11 +5912,7 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
- for (i = 0; i < priv->vector_num; i++)
- hns3_vector_enable(&priv->tqp_vector[i]);
-
- for (i = 0; i < h->kinfo.num_tqps; i++)
- hns3_tqp_enable(h->kinfo.tqp[i]);
+ hns3_enable_irqs_and_tqps(ndev);
netif_tx_wake_all_queues(ndev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index 181af419b878..0ffda5146bae 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -439,6 +439,13 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
ptp->info.settime64 = hclge_ptp_settime;
ptp->info.n_alarm = 0;
+
+ spin_lock_init(&ptp->lock);
+ ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
+ ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
+ hdev->ptp = ptp;
+
ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev);
if (IS_ERR(ptp->clock)) {
dev_err(&hdev->pdev->dev,
@@ -450,12 +457,6 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
return -ENODEV;
}
- spin_lock_init(&ptp->lock);
- ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
- ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
- ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
- hdev->ptp = ptp;
-
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 9ba767740a04..dada42e7e0ec 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1292,9 +1292,8 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
rtnl_unlock();
}
-static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+static int hclgevf_en_hw_strip_rxvtag_cmd(struct hclgevf_dev *hdev, bool enable)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclge_vf_to_pf_msg send_msg;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
@@ -1303,6 +1302,19 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
+static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret;
+
+ ret = hclgevf_en_hw_strip_rxvtag_cmd(hdev, enable);
+ if (ret)
+ return ret;
+
+ hdev->rxvtag_strip_en = enable;
+ return 0;
+}
+
static int hclgevf_reset_tqp(struct hnae3_handle *handle)
{
#define HCLGEVF_RESET_ALL_QUEUE_DONE 1U
@@ -2204,12 +2216,13 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
tc_valid, tc_size);
}
-static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
+static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev,
+ bool rxvtag_strip_en)
{
struct hnae3_handle *nic = &hdev->nic;
int ret;
- ret = hclgevf_en_hw_strip_rxvtag(nic, true);
+ ret = hclgevf_en_hw_strip_rxvtag(nic, rxvtag_strip_en);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to enable rx vlan offload, ret = %d\n", ret);
@@ -2879,7 +2892,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
if (ret)
return ret;
- ret = hclgevf_init_vlan_config(hdev);
+ ret = hclgevf_init_vlan_config(hdev, hdev->rxvtag_strip_en);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to initialize VLAN config\n", ret);
@@ -2994,7 +3007,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
- ret = hclgevf_init_vlan_config(hdev);
+ ret = hclgevf_init_vlan_config(hdev, true);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to initialize VLAN config\n", ret);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index cccef3228461..0208425ab594 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -253,6 +253,7 @@ struct hclgevf_dev {
int *vector_irq;
bool gro_en;
+ bool rxvtag_strip_en;
unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 71e05d30f0fd..d7b90a77bb49 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -1047,10 +1047,5 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
}
-static inline enum ice_phy_model ice_get_phy_model(const struct ice_hw *hw)
-{
- return hw->ptp.phy_model;
-}
-
extern const struct xdp_metadata_ops ice_xdp_md_ops;
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c
index 01a08cfd0090..66e070095d1b 100644
--- a/drivers/net/ethernet/intel/ice/ice_adapter.c
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright Red Hat
-#include <linux/bitfield.h>
#include <linux/cleanup.h>
#include <linux/mutex.h>
#include <linux/pci.h>
@@ -14,32 +13,16 @@
static DEFINE_XARRAY(ice_adapters);
static DEFINE_MUTEX(ice_adapters_mutex);
-/* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */
-#define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13)
-#define INDEX_FIELD_DEV GENMASK(31, 16)
-#define INDEX_FIELD_BUS GENMASK(12, 5)
-#define INDEX_FIELD_SLOT GENMASK(4, 0)
-
-static unsigned long ice_adapter_index(const struct pci_dev *pdev)
+static unsigned long ice_adapter_index(u64 dsn)
{
- unsigned int domain = pci_domain_nr(pdev->bus);
-
- WARN_ON(domain > FIELD_MAX(INDEX_FIELD_DOMAIN));
-
- switch (pdev->device) {
- case ICE_DEV_ID_E825C_BACKPLANE:
- case ICE_DEV_ID_E825C_QSFP:
- case ICE_DEV_ID_E825C_SFP:
- case ICE_DEV_ID_E825C_SGMII:
- return FIELD_PREP(INDEX_FIELD_DEV, pdev->device);
- default:
- return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) |
- FIELD_PREP(INDEX_FIELD_BUS, pdev->bus->number) |
- FIELD_PREP(INDEX_FIELD_SLOT, PCI_SLOT(pdev->devfn));
- }
+#if BITS_PER_LONG == 64
+ return dsn;
+#else
+ return (u32)dsn ^ (u32)(dsn >> 32);
+#endif
}
-static struct ice_adapter *ice_adapter_new(void)
+static struct ice_adapter *ice_adapter_new(u64 dsn)
{
struct ice_adapter *adapter;
@@ -47,6 +30,7 @@ static struct ice_adapter *ice_adapter_new(void)
if (!adapter)
return NULL;
+ adapter->device_serial_number = dsn;
spin_lock_init(&adapter->ptp_gltsyn_time_lock);
refcount_set(&adapter->refcount, 1);
@@ -77,23 +61,26 @@ static void ice_adapter_free(struct ice_adapter *adapter)
* Return: Pointer to ice_adapter on success.
* ERR_PTR() on error. -ENOMEM is the only possible error.
*/
-struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev)
+struct ice_adapter *ice_adapter_get(struct pci_dev *pdev)
{
- unsigned long index = ice_adapter_index(pdev);
+ u64 dsn = pci_get_dsn(pdev);
struct ice_adapter *adapter;
+ unsigned long index;
int err;
+ index = ice_adapter_index(dsn);
scoped_guard(mutex, &ice_adapters_mutex) {
err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL);
if (err == -EBUSY) {
adapter = xa_load(&ice_adapters, index);
refcount_inc(&adapter->refcount);
+ WARN_ON_ONCE(adapter->device_serial_number != dsn);
return adapter;
}
if (err)
return ERR_PTR(err);
- adapter = ice_adapter_new();
+ adapter = ice_adapter_new(dsn);
if (!adapter)
return ERR_PTR(-ENOMEM);
xa_store(&ice_adapters, index, adapter, GFP_KERNEL);
@@ -110,11 +97,13 @@ struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev)
*
* Context: Process, may sleep.
*/
-void ice_adapter_put(const struct pci_dev *pdev)
+void ice_adapter_put(struct pci_dev *pdev)
{
- unsigned long index = ice_adapter_index(pdev);
+ u64 dsn = pci_get_dsn(pdev);
struct ice_adapter *adapter;
+ unsigned long index;
+ index = ice_adapter_index(dsn);
scoped_guard(mutex, &ice_adapters_mutex) {
adapter = xa_load(&ice_adapters, index);
if (WARN_ON(!adapter))
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h
index e233225848b3..ac15c0d2bc1a 100644
--- a/drivers/net/ethernet/intel/ice/ice_adapter.h
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.h
@@ -32,6 +32,7 @@ struct ice_port_list {
* @refcount: Reference count. struct ice_pf objects hold the references.
* @ctrl_pf: Control PF of the adapter
* @ports: Ports list
+ * @device_serial_number: DSN cached for collision detection on 32bit systems
*/
struct ice_adapter {
refcount_t refcount;
@@ -40,9 +41,10 @@ struct ice_adapter {
struct ice_pf *ctrl_pf;
struct ice_port_list ports;
+ u64 device_serial_number;
};
-struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev);
-void ice_adapter_put(const struct pci_dev *pdev);
+struct ice_adapter *ice_adapter_get(struct pci_dev *pdev);
+void ice_adapter_put(struct pci_dev *pdev);
#endif /* _ICE_ADAPTER_H */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 1e801300310e..59df31c2c83f 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -186,7 +186,7 @@ static int ice_set_mac_type(struct ice_hw *hw)
* ice_is_generic_mac - check if device's mac_type is generic
* @hw: pointer to the hardware structure
*
- * Return: true if mac_type is generic (with SBQ support), false if not
+ * Return: true if mac_type is ICE_MAC_GENERIC*, false otherwise.
*/
bool ice_is_generic_mac(struct ice_hw *hw)
{
@@ -195,120 +195,6 @@ bool ice_is_generic_mac(struct ice_hw *hw)
}
/**
- * ice_is_e810
- * @hw: pointer to the hardware structure
- *
- * returns true if the device is E810 based, false if not.
- */
-bool ice_is_e810(struct ice_hw *hw)
-{
- return hw->mac_type == ICE_MAC_E810;
-}
-
-/**
- * ice_is_e810t
- * @hw: pointer to the hardware structure
- *
- * returns true if the device is E810T based, false if not.
- */
-bool ice_is_e810t(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E810C_SFP:
- switch (hw->subsystem_device_id) {
- case ICE_SUBDEV_ID_E810T:
- case ICE_SUBDEV_ID_E810T2:
- case ICE_SUBDEV_ID_E810T3:
- case ICE_SUBDEV_ID_E810T4:
- case ICE_SUBDEV_ID_E810T6:
- case ICE_SUBDEV_ID_E810T7:
- return true;
- }
- break;
- case ICE_DEV_ID_E810C_QSFP:
- switch (hw->subsystem_device_id) {
- case ICE_SUBDEV_ID_E810T2:
- case ICE_SUBDEV_ID_E810T3:
- case ICE_SUBDEV_ID_E810T5:
- return true;
- }
- break;
- default:
- break;
- }
-
- return false;
-}
-
-/**
- * ice_is_e822 - Check if a device is E822 family device
- * @hw: pointer to the hardware structure
- *
- * Return: true if the device is E822 based, false if not.
- */
-bool ice_is_e822(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E822C_BACKPLANE:
- case ICE_DEV_ID_E822C_QSFP:
- case ICE_DEV_ID_E822C_SFP:
- case ICE_DEV_ID_E822C_10G_BASE_T:
- case ICE_DEV_ID_E822C_SGMII:
- case ICE_DEV_ID_E822L_BACKPLANE:
- case ICE_DEV_ID_E822L_SFP:
- case ICE_DEV_ID_E822L_10G_BASE_T:
- case ICE_DEV_ID_E822L_SGMII:
- return true;
- default:
- return false;
- }
-}
-
-/**
- * ice_is_e823
- * @hw: pointer to the hardware structure
- *
- * returns true if the device is E823-L or E823-C based, false if not.
- */
-bool ice_is_e823(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E823L_BACKPLANE:
- case ICE_DEV_ID_E823L_SFP:
- case ICE_DEV_ID_E823L_10G_BASE_T:
- case ICE_DEV_ID_E823L_1GBE:
- case ICE_DEV_ID_E823L_QSFP:
- case ICE_DEV_ID_E823C_BACKPLANE:
- case ICE_DEV_ID_E823C_QSFP:
- case ICE_DEV_ID_E823C_SFP:
- case ICE_DEV_ID_E823C_10G_BASE_T:
- case ICE_DEV_ID_E823C_SGMII:
- return true;
- default:
- return false;
- }
-}
-
-/**
- * ice_is_e825c - Check if a device is E825C family device
- * @hw: pointer to the hardware structure
- *
- * Return: true if the device is E825-C based, false if not.
- */
-bool ice_is_e825c(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E825C_BACKPLANE:
- case ICE_DEV_ID_E825C_QSFP:
- case ICE_DEV_ID_E825C_SFP:
- case ICE_DEV_ID_E825C_SGMII:
- return true;
- default:
- return false;
- }
-}
-
-/**
* ice_is_pf_c827 - check if pf contains c827 phy
* @hw: pointer to the hw struct
*
@@ -2409,7 +2295,7 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
- if (!ice_is_e825c(hw)) {
+ if (hw->mac_type != ICE_MAC_GENERIC_3K_E825) {
info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
} else {
@@ -5766,6 +5652,96 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
}
/**
+ * ice_get_pca9575_handle - find and return the PCA9575 controller
+ * @hw: pointer to the hw struct
+ * @pca9575_handle: GPIO controller's handle
+ *
+ * Find and return the GPIO controller's handle in the netlist.
+ * When found - the value will be cached in the hw structure and following calls
+ * will return cached value.
+ *
+ * Return: 0 on success, -ENXIO when there's no PCA9575 present.
+ */
+int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
+{
+ struct ice_aqc_get_link_topo *cmd;
+ struct ice_aq_desc desc;
+ int err;
+ u8 idx;
+
+ /* If handle was read previously return cached value */
+ if (hw->io_expander_handle) {
+ *pca9575_handle = hw->io_expander_handle;
+ return 0;
+ }
+
+#define SW_PCA9575_SFP_TOPO_IDX 2
+#define SW_PCA9575_QSFP_TOPO_IDX 1
+
+ /* Check if the SW IO expander controlling SMA exists in the netlist. */
+ if (hw->device_id == ICE_DEV_ID_E810C_SFP)
+ idx = SW_PCA9575_SFP_TOPO_IDX;
+ else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
+ idx = SW_PCA9575_QSFP_TOPO_IDX;
+ else
+ return -ENXIO;
+
+ /* If handle was not detected read it from the netlist */
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+ cmd = &desc.params.get_link_topo;
+ cmd->addr.topo_params.node_type_ctx =
+ ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL;
+ cmd->addr.topo_params.index = idx;
+
+ err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (err)
+ return -ENXIO;
+
+ /* Verify if we found the right IO expander type */
+ if (desc.params.get_link_topo.node_part_num !=
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
+ return -ENXIO;
+
+ /* If present save the handle and return it */
+ hw->io_expander_handle =
+ le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ *pca9575_handle = hw->io_expander_handle;
+
+ return 0;
+}
+
+/**
+ * ice_read_pca9575_reg - read the register from the PCA9575 controller
+ * @hw: pointer to the hw struct
+ * @offset: GPIO controller register offset
+ * @data: pointer to data to be read from the GPIO controller
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data)
+{
+ struct ice_aqc_link_topo_addr link_topo;
+ __le16 addr;
+ u16 handle;
+ int err;
+
+ memset(&link_topo, 0, sizeof(link_topo));
+
+ err = ice_get_pca9575_handle(hw, &handle);
+ if (err)
+ return err;
+
+ link_topo.handle = cpu_to_le16(handle);
+ link_topo.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
+
+ addr = cpu_to_le16((u16)offset);
+
+ return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
+}
+
+/**
* ice_aq_set_gpio
* @hw: pointer to the hw struct
* @gpio_ctrl_handle: GPIO controller node handle
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 15ba38543738..9b00aa0ddf10 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -131,7 +131,6 @@ int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
bool ice_is_generic_mac(struct ice_hw *hw);
-bool ice_is_e810(struct ice_hw *hw);
int ice_clear_pf_cfg(struct ice_hw *hw);
int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
@@ -276,10 +275,6 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
-bool ice_is_e810t(struct ice_hw *hw);
-bool ice_is_e822(struct ice_hw *hw);
-bool ice_is_e823(struct ice_hw *hw);
-bool ice_is_e825c(struct ice_hw *hw);
int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
@@ -306,5 +301,7 @@ int
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd);
+int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle);
+int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index 03988be03729..59323c019544 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -2345,15 +2345,15 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
- if (ice_is_e825c(hw))
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
} else {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
- }
- if (!ice_is_e825c(hw))
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ if (hw->mac_type == ICE_MAC_E810 ||
+ hw->mac_type == ICE_MAC_GENERIC)
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ }
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (status)
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index f241493a6ac8..6bbb304ad9ab 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3817,8 +3817,7 @@ static u32 ice_get_combined_cnt(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
- if (q_vector->rx.rx_ring && q_vector->tx.tx_ring)
- combined++;
+ combined += min(q_vector->num_ring_tx, q_vector->num_ring_rx);
}
return combined;
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
index b2148dbe49b2..6b26290452d4 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -381,32 +381,23 @@ void ice_gnss_exit(struct ice_pf *pf)
}
/**
- * ice_gnss_is_gps_present - Check if GPS HW is present
+ * ice_gnss_is_module_present - Check if GNSS HW is present
* @hw: pointer to HW struct
+ *
+ * Return: true when GNSS is present, false otherwise.
*/
-bool ice_gnss_is_gps_present(struct ice_hw *hw)
+bool ice_gnss_is_module_present(struct ice_hw *hw)
{
- if (!hw->func_caps.ts_func_info.src_tmr_owned)
- return false;
+ int err;
+ u8 data;
- if (!ice_is_gps_in_netlist(hw))
+ if (!hw->func_caps.ts_func_info.src_tmr_owned ||
+ !ice_is_gps_in_netlist(hw))
return false;
-#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
- if (ice_is_e810t(hw)) {
- int err;
- u8 data;
-
- err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data);
- if (err || !!(data & ICE_P0_GNSS_PRSNT_N))
- return false;
- } else {
- return false;
- }
-#else
- if (!ice_is_e810t(hw))
+ err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data);
+ if (err || !!(data & ICE_P0_GNSS_PRSNT_N))
return false;
-#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
return true;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h
index 75e567ad7059..15daf603ed7b 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.h
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.h
@@ -37,11 +37,11 @@ struct gnss_serial {
#if IS_ENABLED(CONFIG_GNSS)
void ice_gnss_init(struct ice_pf *pf);
void ice_gnss_exit(struct ice_pf *pf);
-bool ice_gnss_is_gps_present(struct ice_hw *hw);
+bool ice_gnss_is_module_present(struct ice_hw *hw);
#else
static inline void ice_gnss_init(struct ice_pf *pf) { }
static inline void ice_gnss_exit(struct ice_pf *pf) { }
-static inline bool ice_gnss_is_gps_present(struct ice_hw *hw)
+static inline bool ice_gnss_is_module_present(struct ice_hw *hw)
{
return false;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c
index ad82ff7d1995..09f9c7ba5279 100644
--- a/drivers/net/ethernet/intel/ice/ice_irq.c
+++ b/drivers/net/ethernet/intel/ice/ice_irq.c
@@ -45,7 +45,7 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index)
/**
* ice_get_irq_res - get an interrupt resource
* @pf: board private structure
- * @dyn_only: force entry to be dynamically allocated
+ * @dyn_allowed: allow entry to be dynamically allocated
*
* Allocate new irq entry in the free slot of the tracker. Since xarray
* is used, always allocate new entry at the lowest possible index. Set
@@ -53,11 +53,12 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index)
*
* Returns allocated irq entry or NULL on failure.
*/
-static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
+static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf,
+ bool dyn_allowed)
{
- struct xa_limit limit = { .max = pf->irq_tracker.num_entries,
+ struct xa_limit limit = { .max = pf->irq_tracker.num_entries - 1,
.min = 0 };
- unsigned int num_static = pf->irq_tracker.num_static;
+ unsigned int num_static = pf->irq_tracker.num_static - 1;
struct ice_irq_entry *entry;
unsigned int index;
int ret;
@@ -66,9 +67,9 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
if (!entry)
return NULL;
- /* skip preallocated entries if the caller says so */
- if (dyn_only)
- limit.min = num_static;
+ /* only already allocated if the caller says so */
+ if (!dyn_allowed)
+ limit.max = num_static;
ret = xa_alloc(&pf->irq_tracker.entries, &index, entry, limit,
GFP_KERNEL);
@@ -78,7 +79,7 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
entry = NULL;
} else {
entry->index = index;
- entry->dynamic = index >= num_static;
+ entry->dynamic = index > num_static;
}
return entry;
@@ -272,7 +273,7 @@ int ice_init_interrupt_scheme(struct ice_pf *pf)
/**
* ice_alloc_irq - Allocate new interrupt vector
* @pf: board private structure
- * @dyn_only: force dynamic allocation of the interrupt
+ * @dyn_allowed: allow dynamic allocation of the interrupt
*
* Allocate new interrupt vector for a given owner id.
* return struct msi_map with interrupt details and track
@@ -285,20 +286,20 @@ int ice_init_interrupt_scheme(struct ice_pf *pf)
* interrupt will be allocated with pci_msix_alloc_irq_at.
*
* Some callers may only support dynamically allocated interrupts.
- * This is indicated with dyn_only flag.
+ * This is indicated with dyn_allowed flag.
*
* On failure, return map with negative .index. The caller
* is expected to check returned map index.
*
*/
-struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only)
+struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_allowed)
{
int sriov_base_vector = pf->sriov_base_vector;
struct msi_map map = { .index = -ENOENT };
struct device *dev = ice_pf_to_dev(pf);
struct ice_irq_entry *entry;
- entry = ice_get_irq_res(pf, dyn_only);
+ entry = ice_get_irq_res(pf, dyn_allowed);
if (!entry)
return map;
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 22371011c249..2410aee59fb2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -1321,12 +1321,18 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
*/
if (!primary_lag) {
lag->primary = true;
+ if (!ice_is_switchdev_running(lag->pf))
+ return;
+
/* Configure primary's SWID to be shared */
ice_lag_primary_swid(lag, true);
primary_lag = lag;
} else {
u16 swid;
+ if (!ice_is_switchdev_running(primary_lag->pf))
+ return;
+
swid = primary_lag->pf->hw.port_info->sw_id;
ice_lag_set_swid(swid, lag, true);
ice_lag_add_prune_list(primary_lag, lag->pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index d0faa087793d..021ed7451bb9 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -567,6 +567,8 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
return -ENOMEM;
}
+ vsi->irq_dyn_alloc = pci_msix_can_alloc_dyn(vsi->back->pdev);
+
switch (vsi->type) {
case ICE_VSI_PF:
case ICE_VSI_SF:
@@ -3882,7 +3884,7 @@ void ice_init_feature_support(struct ice_pf *pf)
ice_set_feature_support(pf, ICE_F_CGU);
if (ice_is_clock_mux_in_netlist(&pf->hw))
ice_set_feature_support(pf, ICE_F_SMA_CTRL);
- if (ice_gnss_is_gps_present(&pf->hw))
+ if (ice_gnss_is_module_present(&pf->hw))
ice_set_feature_support(pf, ICE_F_GNSS);
break;
default:
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index e13bd5a6cb6c..d24d46b24e37 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -5186,11 +5186,12 @@ int ice_load(struct ice_pf *pf)
ice_napi_add(vsi);
+ ice_init_features(pf);
+
err = ice_init_rdma(pf);
if (err)
goto err_init_rdma;
- ice_init_features(pf);
ice_service_task_restart(pf);
clear_bit(ICE_DOWN, pf->state);
@@ -5198,6 +5199,7 @@ int ice_load(struct ice_pf *pf)
return 0;
err_init_rdma:
+ ice_deinit_features(pf);
ice_tc_indir_block_unregister(vsi);
err_tc_indir_block_register:
ice_unregister_netdev(vsi);
@@ -5221,8 +5223,8 @@ void ice_unload(struct ice_pf *pf)
devl_assert_locked(priv_to_devlink(pf));
- ice_deinit_features(pf);
ice_deinit_rdma(pf);
+ ice_deinit_features(pf);
ice_tc_indir_block_unregister(vsi);
ice_unregister_netdev(vsi);
ice_devlink_destroy_pf_port(pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index a99e0fbd0b8b..92ce419ff0bc 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -1318,20 +1318,20 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
struct ice_hw *hw = &pf->hw;
int err;
- if (ice_is_e810(hw))
- return 0;
-
mutex_lock(&ptp_port->ps_lock);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_stop_phy_timer_eth56g(hw, port, true);
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ err = 0;
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
err = ice_stop_phy_timer_e82x(hw, port, true);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_stop_phy_timer_eth56g(hw, port, true);
+ break;
default:
err = -ENODEV;
}
@@ -1361,19 +1361,16 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
unsigned long flags;
int err;
- if (ice_is_e810(hw))
- return 0;
-
if (!ptp_port->link_up)
return ice_ptp_port_phy_stop(ptp_port);
mutex_lock(&ptp_port->ps_lock);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_start_phy_timer_eth56g(hw, port);
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ err = 0;
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
/* Start the PHY timer in Vernier mode */
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
@@ -1398,6 +1395,9 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
0);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_start_phy_timer_eth56g(hw, port);
+ break;
default:
err = -ENODEV;
}
@@ -1432,12 +1432,13 @@ void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
/* Skip HW writes if reset is in progress */
if (pf->hw.reset_ongoing)
return;
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_E810:
+
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
/* Do not reconfigure E810 PHY */
return;
- case ICE_PHY_ETH56G:
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
ice_ptp_port_phy_restart(ptp_port);
return;
default:
@@ -1465,46 +1466,44 @@ static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
ice_ptp_reset_ts_memory(hw);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G: {
- int port;
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ return 0;
+ case ICE_MAC_GENERIC: {
+ int quad;
- for (port = 0; port < hw->ptp.num_lports; port++) {
+ for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
+ quad++) {
int err;
- err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
+ err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
if (err) {
- dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
- port, err);
+ dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
+ quad, err);
return err;
}
}
return 0;
}
- case ICE_PHY_E82X: {
- int quad;
+ case ICE_MAC_GENERIC_3K_E825: {
+ int port;
- for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
- quad++) {
+ for (port = 0; port < hw->ptp.num_lports; port++) {
int err;
- err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
+ err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
if (err) {
- dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
- quad, err);
+ dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
+ port, err);
return err;
}
}
return 0;
}
- case ICE_PHY_E810:
- return 0;
- case ICE_PHY_UNSUP:
+ case ICE_MAC_UNKNOWN:
default:
- dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__,
- ice_get_phy_model(hw));
return -EOPNOTSUPP;
}
}
@@ -1740,7 +1739,7 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
/* 0. Reset mode & out_en in AUX_OUT */
wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
- if (ice_is_e825c(hw)) {
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) {
int err;
/* Enable/disable CGU 1PPS output for E825C */
@@ -1825,7 +1824,7 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0);
if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 &&
- period != NSEC_PER_SEC && hw->ptp.phy_model == ICE_PHY_E82X) {
+ period != NSEC_PER_SEC && hw->mac_type == ICE_MAC_GENERIC) {
dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n");
return -EOPNOTSUPP;
}
@@ -2080,7 +2079,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
/* For Vernier mode on E82X, we need to recalibrate after new settime.
* Start with marking timestamps as invalid.
*/
- if (ice_get_phy_model(hw) == ICE_PHY_E82X) {
+ if (hw->mac_type == ICE_MAC_GENERIC) {
err = ice_ptp_clear_phy_offset_ready_e82x(hw);
if (err)
dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
@@ -2104,7 +2103,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
ice_ptp_enable_all_perout(pf);
/* Recalibrate and re-enable timestamp blocks for E822/E823 */
- if (ice_get_phy_model(hw) == ICE_PHY_E82X)
+ if (hw->mac_type == ICE_MAC_GENERIC)
ice_ptp_restart_all_phy(pf);
exit:
if (err) {
@@ -2558,7 +2557,7 @@ static void ice_ptp_set_funcs_e82x(struct ice_pf *pf)
pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp_e82x;
#endif /* CONFIG_ICE_HWTS */
- if (ice_is_e825c(&pf->hw)) {
+ if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) {
pf->ptp.ice_pin_desc = ice_pin_desc_e825c;
pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c);
} else {
@@ -2646,10 +2645,17 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
info->enable = ice_ptp_gpio_enable;
info->verify = ice_verify_pin;
- if (ice_is_e810(&pf->hw))
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_E810:
ice_ptp_set_funcs_e810(pf);
- else
+ return;
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
ice_ptp_set_funcs_e82x(pf);
+ return;
+ default:
+ return;
+ }
}
/**
@@ -2779,7 +2785,7 @@ static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
bool trigger_oicr = false;
unsigned int i;
- if (ice_is_e810(hw))
+ if (!pf->ptp.port.tx.has_ready_bitmap)
return;
if (!ice_pf_src_tmr_owned(pf))
@@ -2914,14 +2920,12 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
*/
ice_ptp_flush_all_tx_tracker(pf);
- if (!ice_is_e810(hw)) {
- /* Enable quad interrupts */
- err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
- if (err)
- return err;
+ /* Enable quad interrupts */
+ err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
+ if (err)
+ return err;
- ice_ptp_restart_all_phy(pf);
- }
+ ice_ptp_restart_all_phy(pf);
/* Re-enable all periodic outputs and external timestamp events */
ice_ptp_enable_all_perout(pf);
@@ -2973,8 +2977,9 @@ err:
static bool ice_is_primary(struct ice_hw *hw)
{
- return ice_is_e825c(hw) && ice_is_dual(hw) ?
- !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : true;
+ return hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) ?
+ !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) :
+ true;
}
static int ice_ptp_setup_adapter(struct ice_pf *pf)
@@ -2992,7 +2997,7 @@ static int ice_ptp_setup_pf(struct ice_pf *pf)
struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
struct ice_ptp *ptp = &pf->ptp;
- if (WARN_ON(!ctrl_ptp) || ice_get_phy_model(&pf->hw) == ICE_PHY_UNSUP)
+ if (WARN_ON(!ctrl_ptp) || pf->hw.mac_type == ICE_MAC_UNKNOWN)
return -ENODEV;
INIT_LIST_HEAD(&ptp->port.list_node);
@@ -3009,7 +3014,7 @@ static void ice_ptp_cleanup_pf(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
- if (ice_get_phy_model(&pf->hw) != ICE_PHY_UNSUP) {
+ if (pf->hw.mac_type != ICE_MAC_UNKNOWN) {
mutex_lock(&pf->adapter->ports.lock);
list_del(&ptp->port.list_node);
mutex_unlock(&pf->adapter->ports.lock);
@@ -3136,18 +3141,18 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
mutex_init(&ptp_port->ps_lock);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx,
- ptp_port->port_num);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
kthread_init_delayed_work(&ptp_port->ov_work,
ice_ptp_wait_for_offsets);
return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
ptp_port->port_num);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx,
+ ptp_port->port_num);
default:
return -ENODEV;
}
@@ -3164,8 +3169,8 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
*/
static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
{
- switch (ice_get_phy_model(&pf->hw)) {
- case ICE_PHY_E82X:
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_GENERIC:
/* E822 based PHY has the clock owner process the interrupt
* for all ports.
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index ec91822e9280..8475d422f1ec 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -746,7 +746,7 @@ static int ice_init_cgu_e82x(struct ice_hw *hw)
int err;
/* Disable sticky lock detection so lock err reported is accurate */
- if (ice_is_e825c(hw))
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825)
err = ice_cfg_cgu_pll_dis_sticky_bits_e825c(hw);
else
err = ice_cfg_cgu_pll_dis_sticky_bits_e82x(hw);
@@ -756,7 +756,7 @@ static int ice_init_cgu_e82x(struct ice_hw *hw)
/* Configure the CGU PLL using the parameters from the function
* capabilities.
*/
- if (ice_is_e825c(hw))
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825)
err = ice_cfg_cgu_pll_e825c(hw, ts_info->time_ref,
(enum ice_clk_src)ts_info->clk_src);
else
@@ -827,8 +827,8 @@ static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw,
/* Certain hardware families share the same register values for the
* port register and source timer register.
*/
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_ptp_tmr_cmd_to_src_reg(hw, cmd) & TS_CMD_MASK_E810;
default:
break;
@@ -2729,10 +2729,7 @@ static void ice_ptp_init_phy_e825(struct ice_hw *hw)
{
struct ice_ptp_hw *ptp = &hw->ptp;
struct ice_eth56g_params *params;
- u32 phy_rev;
- int err;
- ptp->phy_model = ICE_PHY_ETH56G;
params = &ptp->phy.eth56g;
params->onestep_ena = false;
params->peer_delay = 0;
@@ -2742,9 +2739,6 @@ static void ice_ptp_init_phy_e825(struct ice_hw *hw)
ptp->num_lports = params->num_phys * ptp->ports_per_phy;
ice_sb_access_ena_eth56g(hw, true);
- err = ice_read_phy_eth56g(hw, hw->pf_id, PHY_REG_REVISION, &phy_rev);
- if (err || phy_rev != PHY_REVISION_ETH56G)
- ptp->phy_model = ICE_PHY_UNSUP;
}
/* E822 family functions
@@ -4792,7 +4786,6 @@ int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold)
*/
static void ice_ptp_init_phy_e82x(struct ice_ptp_hw *ptp)
{
- ptp->phy_model = ICE_PHY_E82X;
ptp->num_lports = 8;
ptp->ports_per_phy = 8;
}
@@ -5316,68 +5309,6 @@ ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready)
*/
/**
- * ice_get_pca9575_handle
- * @hw: pointer to the hw struct
- * @pca9575_handle: GPIO controller's handle
- *
- * Find and return the GPIO controller's handle in the netlist.
- * When found - the value will be cached in the hw structure and following calls
- * will return cached value
- */
-static int
-ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
-{
- struct ice_aqc_get_link_topo *cmd;
- struct ice_aq_desc desc;
- int status;
- u8 idx;
-
- /* If handle was read previously return cached value */
- if (hw->io_expander_handle) {
- *pca9575_handle = hw->io_expander_handle;
- return 0;
- }
-
- /* If handle was not detected read it from the netlist */
- cmd = &desc.params.get_link_topo;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
-
- /* Set node type to GPIO controller */
- cmd->addr.topo_params.node_type_ctx =
- (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
- ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
-
-#define SW_PCA9575_SFP_TOPO_IDX 2
-#define SW_PCA9575_QSFP_TOPO_IDX 1
-
- /* Check if the SW IO expander controlling SMA exists in the netlist. */
- if (hw->device_id == ICE_DEV_ID_E810C_SFP)
- idx = SW_PCA9575_SFP_TOPO_IDX;
- else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
- idx = SW_PCA9575_QSFP_TOPO_IDX;
- else
- return -EOPNOTSUPP;
-
- cmd->addr.topo_params.index = idx;
-
- status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
- if (status)
- return -EOPNOTSUPP;
-
- /* Verify if we found the right IO expander type */
- if (desc.params.get_link_topo.node_part_num !=
- ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
- return -EOPNOTSUPP;
-
- /* If present save the handle and return it */
- hw->io_expander_handle =
- le16_to_cpu(desc.params.get_link_topo.addr.handle);
- *pca9575_handle = hw->io_expander_handle;
-
- return 0;
-}
-
-/**
* ice_read_sma_ctrl
* @hw: pointer to the hw struct
* @data: pointer to data to be read from the GPIO controller
@@ -5442,37 +5373,6 @@ int ice_write_sma_ctrl(struct ice_hw *hw, u8 data)
}
/**
- * ice_read_pca9575_reg
- * @hw: pointer to the hw struct
- * @offset: GPIO controller register offset
- * @data: pointer to data to be read from the GPIO controller
- *
- * Read the register from the GPIO controller
- */
-int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data)
-{
- struct ice_aqc_link_topo_addr link_topo;
- __le16 addr;
- u16 handle;
- int err;
-
- memset(&link_topo, 0, sizeof(link_topo));
-
- err = ice_get_pca9575_handle(hw, &handle);
- if (err)
- return err;
-
- link_topo.handle = cpu_to_le16(handle);
- link_topo.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
- ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
-
- addr = cpu_to_le16((u16)offset);
-
- return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
-}
-
-/**
* ice_ptp_read_sdp_ac - read SDP available connections section from NVM
* @hw: pointer to the HW struct
* @entries: returns the SDP available connections section from NVM
@@ -5538,7 +5438,6 @@ exit:
*/
static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp)
{
- ptp->phy_model = ICE_PHY_E810;
ptp->num_lports = 8;
ptp->ports_per_phy = 4;
@@ -5547,9 +5446,8 @@ static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp)
/* Device agnostic functions
*
- * The following functions implement shared behavior common to both E822 and
- * E810 devices, possibly calling a device specific implementation where
- * necessary.
+ * The following functions implement shared behavior common to all devices,
+ * possibly calling a device specific implementation where necessary.
*/
/**
@@ -5612,14 +5510,19 @@ void ice_ptp_init_hw(struct ice_hw *hw)
{
struct ice_ptp_hw *ptp = &hw->ptp;
- if (ice_is_e822(hw) || ice_is_e823(hw))
- ice_ptp_init_phy_e82x(ptp);
- else if (ice_is_e810(hw))
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
ice_ptp_init_phy_e810(ptp);
- else if (ice_is_e825c(hw))
+ break;
+ case ICE_MAC_GENERIC:
+ ice_ptp_init_phy_e82x(ptp);
+ break;
+ case ICE_MAC_GENERIC_3K_E825:
ice_ptp_init_phy_e825(hw);
- else
- ptp->phy_model = ICE_PHY_UNSUP;
+ break;
+ default:
+ return;
+ }
}
/**
@@ -5640,11 +5543,11 @@ void ice_ptp_init_hw(struct ice_hw *hw)
static int ice_ptp_write_port_cmd(struct ice_hw *hw, u8 port,
enum ice_ptp_tmr_cmd cmd)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_ptp_write_port_cmd_eth56g(hw, port, cmd);
- case ICE_PHY_E82X:
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
return ice_ptp_write_port_cmd_e82x(hw, port, cmd);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_ptp_write_port_cmd_eth56g(hw, port, cmd);
default:
return -EOPNOTSUPP;
}
@@ -5705,8 +5608,8 @@ static int ice_ptp_port_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
u32 port;
/* PHY models which can program all ports simultaneously */
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_ptp_port_cmd_e810(hw, cmd);
default:
break;
@@ -5784,17 +5687,17 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
/* PHY timers */
/* Fill Rx and Tx ports and send msg to PHY */
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_ptp_prep_phy_time_eth56g(hw,
- (u32)(time & 0xFFFFFFFF));
- break;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_time_e82x(hw, time & 0xFFFFFFFF);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_time_eth56g(hw,
+ (u32)(time & 0xFFFFFFFF));
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -5830,16 +5733,16 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_ptp_prep_phy_incval_eth56g(hw, incval);
- break;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_incval_e810(hw, incval);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_incval_e82x(hw, incval);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_incval_eth56g(hw, incval);
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -5899,16 +5802,16 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_ptp_prep_phy_adj_eth56g(hw, adj);
- break;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_adj_e810(hw, adj);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_adj_e82x(hw, adj);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_adj_eth56g(hw, adj);
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -5932,13 +5835,13 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
*/
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp);
default:
return -EOPNOTSUPP;
}
@@ -5962,13 +5865,13 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
*/
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_clear_ptp_tstamp_eth56g(hw, block, idx);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_clear_phy_tstamp_e810(hw, block, idx);
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
return ice_clear_phy_tstamp_e82x(hw, block, idx);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_clear_ptp_tstamp_eth56g(hw, block, idx);
default:
return -EOPNOTSUPP;
}
@@ -6025,14 +5928,14 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
*/
void ice_ptp_reset_ts_memory(struct ice_hw *hw)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- ice_ptp_reset_ts_memory_eth56g(hw);
- break;
- case ICE_PHY_E82X:
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
ice_ptp_reset_ts_memory_e82x(hw);
break;
- case ICE_PHY_E810:
+ case ICE_MAC_GENERIC_3K_E825:
+ ice_ptp_reset_ts_memory_eth56g(hw);
+ break;
+ case ICE_MAC_E810:
default:
return;
}
@@ -6054,13 +5957,13 @@ int ice_ptp_init_phc(struct ice_hw *hw)
/* Clear event err indications for auxiliary pins */
(void)rd32(hw, GLTSYN_STAT(src_idx));
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_ptp_init_phc_eth56g(hw);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_ptp_init_phc_e810(hw);
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
return ice_ptp_init_phc_e82x(hw);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_ptp_init_phc_eth56g(hw);
default:
return -EOPNOTSUPP;
}
@@ -6079,16 +5982,16 @@ int ice_ptp_init_phc(struct ice_hw *hw)
*/
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_get_phy_tx_tstamp_ready_eth56g(hw, block,
- tstamp_ready);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_get_phy_tx_tstamp_ready_e810(hw, block,
tstamp_ready);
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
return ice_get_phy_tx_tstamp_ready_e82x(hw, block,
tstamp_ready);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_get_phy_tx_tstamp_ready_eth56g(hw, block,
+ tstamp_ready);
break;
default:
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 6779ce120515..6b4679407558 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -395,7 +395,6 @@ int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold);
/* E810 family functions */
int ice_read_sma_ctrl(struct ice_hw *hw, u8 *data);
int ice_write_sma_ctrl(struct ice_hw *hw, u8 data);
-int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
int ice_ptp_read_sdp_ac(struct ice_hw *hw, __le16 *entries, uint *num_entries);
int ice_cgu_get_num_pins(struct ice_hw *hw, bool input);
enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input);
@@ -431,13 +430,13 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port);
*/
static inline u64 ice_get_base_incval(struct ice_hw *hw)
{
- switch (hw->ptp.phy_model) {
- case ICE_PHY_ETH56G:
- return ICE_ETH56G_NOMINAL_INCVAL;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ICE_PTP_NOMINAL_INCVAL_E810;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
return ice_e82x_nominal_incval(ice_e82x_time_ref(hw));
+ case ICE_MAC_GENERIC_3K_E825:
+ return ICE_ETH56G_NOMINAL_INCVAL;
default:
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 33a1a5934c0d..0aab21113cc4 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -871,14 +871,6 @@ union ice_phy_params {
struct ice_eth56g_params eth56g;
};
-/* PHY model */
-enum ice_phy_model {
- ICE_PHY_UNSUP = -1,
- ICE_PHY_E810 = 1,
- ICE_PHY_E82X,
- ICE_PHY_ETH56G,
-};
-
/* Global Link Topology */
enum ice_global_link_topo {
ICE_LINK_TOPO_UP_TO_2_LINKS,
@@ -888,7 +880,6 @@ enum ice_global_link_topo {
};
struct ice_ptp_hw {
- enum ice_phy_model phy_model;
union ice_phy_params phy;
u8 num_lports;
u8 ports_per_phy;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 1af51469f070..9be9ce300fa4 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -4211,7 +4211,6 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
}
ice_vfhw_mac_add(vf, &al->list[i]);
- vf->num_mac++;
break;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index 9be4bd717512..f90f545b3144 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -2097,6 +2097,11 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
pf = vf->pf;
dev = ice_pf_to_dev(pf);
vf_vsi = ice_get_vf_vsi(vf);
+ if (!vf_vsi) {
+ dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err_exit;
+ }
#define ICE_VF_MAX_FDIR_FILTERS 128
if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index 66544faab710..70dbf80f3bb7 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -143,6 +143,7 @@ enum idpf_vport_state {
* @vport_id: Vport identifier
* @link_speed_mbps: Link speed in mbps
* @vport_idx: Relative vport index
+ * @max_tx_hdr_size: Max header length hardware can support
* @state: See enum idpf_vport_state
* @netstats: Packet and byte stats
* @stats_lock: Lock to protect stats update
@@ -153,6 +154,7 @@ struct idpf_netdev_priv {
u32 vport_id;
u32 link_speed_mbps;
u16 vport_idx;
+ u16 max_tx_hdr_size;
enum idpf_vport_state state;
struct rtnl_link_stats64 netstats;
spinlock_t stats_lock;
@@ -629,13 +631,13 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |\
VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6)
-#define IDPF_CAP_RX_CSUM_L4V4 (\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP)
+#define IDPF_CAP_TX_CSUM_L4V4 (\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP |\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP)
-#define IDPF_CAP_RX_CSUM_L4V6 (\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
+#define IDPF_CAP_TX_CSUM_L4V6 (\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP |\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP)
#define IDPF_CAP_RX_CSUM (\
VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 |\
@@ -644,11 +646,9 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\
VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
-#define IDPF_CAP_SCTP_CSUM (\
+#define IDPF_CAP_TX_SCTP_CSUM (\
VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP |\
- VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP |\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP |\
- VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP)
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP)
#define IDPF_CAP_TUNNEL_TX_CSUM (\
VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |\
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index a055a47449f1..df71e6ad6510 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -703,8 +703,10 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport_config *vport_config;
+ netdev_features_t other_offloads = 0;
+ netdev_features_t csum_offloads = 0;
+ netdev_features_t tso_offloads = 0;
netdev_features_t dflt_features;
- netdev_features_t offloads = 0;
struct idpf_netdev_priv *np;
struct net_device *netdev;
u16 idx = vport->idx;
@@ -721,6 +723,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
np->vport = vport;
np->vport_idx = vport->idx;
np->vport_id = vport->vport_id;
+ np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
vport->netdev = netdev;
return idpf_init_mac_addr(vport, netdev);
@@ -738,6 +741,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
np->adapter = adapter;
np->vport_idx = vport->idx;
np->vport_id = vport->vport_id;
+ np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
spin_lock_init(&np->stats_lock);
@@ -766,53 +770,32 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
dflt_features |= NETIF_F_RXHASH;
- if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V4))
- dflt_features |= NETIF_F_IP_CSUM;
- if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V6))
- dflt_features |= NETIF_F_IPV6_CSUM;
+ if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V4))
+ csum_offloads |= NETIF_F_IP_CSUM;
+ if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V6))
+ csum_offloads |= NETIF_F_IPV6_CSUM;
if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM))
- dflt_features |= NETIF_F_RXCSUM;
- if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_SCTP_CSUM))
- dflt_features |= NETIF_F_SCTP_CRC;
+ csum_offloads |= NETIF_F_RXCSUM;
+ if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_SCTP_CSUM))
+ csum_offloads |= NETIF_F_SCTP_CRC;
if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP))
- dflt_features |= NETIF_F_TSO;
+ tso_offloads |= NETIF_F_TSO;
if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP))
- dflt_features |= NETIF_F_TSO6;
+ tso_offloads |= NETIF_F_TSO6;
if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS,
VIRTCHNL2_CAP_SEG_IPV4_UDP |
VIRTCHNL2_CAP_SEG_IPV6_UDP))
- dflt_features |= NETIF_F_GSO_UDP_L4;
+ tso_offloads |= NETIF_F_GSO_UDP_L4;
if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC))
- offloads |= NETIF_F_GRO_HW;
- /* advertise to stack only if offloads for encapsulated packets is
- * supported
- */
- if (idpf_is_cap_ena(vport->adapter, IDPF_SEG_CAPS,
- VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL)) {
- offloads |= NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_GRE |
- NETIF_F_GSO_GRE_CSUM |
- NETIF_F_GSO_PARTIAL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_IPXIP4 |
- NETIF_F_GSO_IPXIP6 |
- 0;
-
- if (!idpf_is_cap_ena_all(vport->adapter, IDPF_CSUM_CAPS,
- IDPF_CAP_TUNNEL_TX_CSUM))
- netdev->gso_partial_features |=
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
- netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
- offloads |= NETIF_F_TSO_MANGLEID;
- }
+ other_offloads |= NETIF_F_GRO_HW;
if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK))
- offloads |= NETIF_F_LOOPBACK;
+ other_offloads |= NETIF_F_LOOPBACK;
- netdev->features |= dflt_features;
- netdev->hw_features |= dflt_features | offloads;
- netdev->hw_enc_features |= dflt_features | offloads;
+ netdev->features |= dflt_features | csum_offloads | tso_offloads;
+ netdev->hw_features |= netdev->features | other_offloads;
+ netdev->vlan_features |= netdev->features | other_offloads;
+ netdev->hw_enc_features |= dflt_features | other_offloads;
idpf_set_ethtool_ops(netdev);
SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
@@ -1131,11 +1114,9 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
num_max_q = max(max_q->max_txq, max_q->max_rxq);
vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
- if (!vport->q_vector_idxs) {
- kfree(vport);
+ if (!vport->q_vector_idxs)
+ goto free_vport;
- return NULL;
- }
idpf_vport_init(vport, max_q);
/* This alloc is done separate from the LUT because it's not strictly
@@ -1145,11 +1126,9 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
*/
rss_data = &adapter->vport_config[idx]->user_config.rss_data;
rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
- if (!rss_data->rss_key) {
- kfree(vport);
+ if (!rss_data->rss_key)
+ goto free_vector_idxs;
- return NULL;
- }
/* Initialize default rss key */
netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
@@ -1162,6 +1141,13 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
adapter->next_vport = idpf_get_free_slot(adapter);
return vport;
+
+free_vector_idxs:
+ kfree(vport->q_vector_idxs);
+free_vport:
+ kfree(vport);
+
+ return NULL;
}
/**
@@ -2218,8 +2204,8 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
struct net_device *netdev,
netdev_features_t features)
{
- struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ u16 max_tx_hdr_size = np->max_tx_hdr_size;
size_t len;
/* No point in doing any of this if neither checksum nor GSO are
@@ -2242,7 +2228,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
goto unsupported;
len = skb_network_header_len(skb);
- if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
+ if (unlikely(len > max_tx_hdr_size))
goto unsupported;
if (!skb->encapsulation)
@@ -2255,7 +2241,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
/* IPLEN can support at most 127 dwords */
len = skb_inner_network_header_len(skb);
- if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
+ if (unlikely(len > max_tx_hdr_size))
goto unsupported;
/* No need to validate L4LEN as TCP is the only protocol with a
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index bec4a02c5373..b35713036a54 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -89,6 +89,7 @@ static void idpf_shutdown(struct pci_dev *pdev)
{
struct idpf_adapter *adapter = pci_get_drvdata(pdev);
+ cancel_delayed_work_sync(&adapter->serv_task);
cancel_delayed_work_sync(&adapter->vc_event_task);
idpf_vc_core_deinit(adapter);
idpf_deinit_dflt_mbx(adapter);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 977741c41498..60b2e034c034 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -4031,6 +4031,14 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
return budget;
}
+ /* Switch to poll mode in the tear-down path after sending disable
+ * queues virtchnl message, as the interrupts will be disabled after
+ * that.
+ */
+ if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
+ q_vector->tx[0])))
+ return budget;
+
work_done = min_t(int, work_done, budget - 1);
/* Exit the polling mode, but don't re-enable interrupts if stack might
@@ -4041,15 +4049,7 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
else
idpf_vport_intr_set_wb_on_itr(q_vector);
- /* Switch to poll mode in the tear-down path after sending disable
- * queues virtchnl message, as the interrupts will be disabled after
- * that
- */
- if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
- q_vector->tx[0])))
- return budget;
- else
- return work_done;
+ return work_done;
}
/**
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 612ed26a29c5..efc7b30e4211 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -1290,6 +1290,8 @@ void igc_ptp_reset(struct igc_adapter *adapter)
/* reset the tstamp_config */
igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
+ mutex_lock(&adapter->ptm_lock);
+
spin_lock_irqsave(&adapter->tmreg_lock, flags);
switch (adapter->hw.mac.type) {
@@ -1308,7 +1310,6 @@ void igc_ptp_reset(struct igc_adapter *adapter)
if (!igc_is_crosststamp_supported(adapter))
break;
- mutex_lock(&adapter->ptm_lock);
wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT);
wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT);
@@ -1332,7 +1333,6 @@ void igc_ptp_reset(struct igc_adapter *adapter)
netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n");
igc_ptm_reset(hw);
- mutex_unlock(&adapter->ptm_lock);
break;
default:
/* No work to do. */
@@ -1349,5 +1349,7 @@ void igc_ptp_reset(struct igc_adapter *adapter)
out:
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+ mutex_unlock(&adapter->ptm_lock);
+
wrfl();
}
diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c
index 869815f48ac1..9eb47b4beb06 100644
--- a/drivers/net/ethernet/intel/igc/igc_xdp.c
+++ b/drivers/net/ethernet/intel/igc/igc_xdp.c
@@ -14,6 +14,7 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
bool if_running = netif_running(dev);
struct bpf_prog *old_prog;
bool need_update;
+ unsigned int i;
if (dev->mtu > ETH_DATA_LEN) {
/* For now, the driver doesn't support XDP functionality with
@@ -24,8 +25,13 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
}
need_update = !!adapter->xdp_prog != !!prog;
- if (if_running && need_update)
- igc_close(dev);
+ if (if_running && need_update) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ igc_disable_rx_ring(adapter->rx_ring[i]);
+ igc_disable_tx_ring(adapter->tx_ring[i]);
+ napi_disable(&adapter->rx_ring[i]->q_vector->napi);
+ }
+ }
old_prog = xchg(&adapter->xdp_prog, prog);
if (old_prog)
@@ -36,8 +42,13 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
else
xdp_features_clear_redirect_target(dev);
- if (if_running && need_update)
- igc_open(dev);
+ if (if_running && need_update) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ napi_enable(&adapter->rx_ring[i]->q_vector->napi);
+ igc_enable_tx_ring(adapter->tx_ring[i]);
+ igc_enable_rx_ring(adapter->rx_ring[i]);
+ }
+ }
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 467f81239e12..481f917f7ed2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3185,6 +3185,10 @@ static void ixgbe_handle_fw_event(struct ixgbe_adapter *adapter)
case ixgbe_aci_opc_get_link_status:
ixgbe_handle_link_status_event(adapter, &event);
break;
+ case ixgbe_aci_opc_temp_tca_event:
+ e_crit(drv, "%s\n", ixgbe_overheat_msg);
+ ixgbe_down(adapter);
+ break;
default:
e_warn(hw, "unknown FW async event captured\n");
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
index 8d06ade3c7cd..617e07878e4f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
@@ -171,6 +171,9 @@ enum ixgbe_aci_opc {
ixgbe_aci_opc_done_alt_write = 0x0904,
ixgbe_aci_opc_clear_port_alt_write = 0x0906,
+ /* TCA Events */
+ ixgbe_aci_opc_temp_tca_event = 0x0C94,
+
/* debug commands */
ixgbe_aci_opc_debug_dump_internals = 0xFF08,
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 0a679e95196f..24499bb36c00 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -1223,7 +1223,7 @@ static void octep_hb_timeout_task(struct work_struct *work)
miss_cnt);
rtnl_lock();
if (netif_running(oct->netdev))
- octep_stop(oct->netdev);
+ dev_close(oct->netdev);
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
index 18c922dd5fc6..ccb69bc5c952 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
@@ -835,7 +835,9 @@ static void octep_vf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
struct octep_vf_device *oct = netdev_priv(netdev);
netdev_hold(netdev, NULL, GFP_ATOMIC);
- schedule_work(&oct->tx_timeout_task);
+ if (!schedule_work(&oct->tx_timeout_task))
+ netdev_put(netdev, NULL);
+
}
static int octep_vf_set_mac(struct net_device *netdev, void *p)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 8216f843a7cd..971993586fb4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -66,8 +66,18 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
/* Supported devices */
static const struct pci_device_id cgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_A) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_A) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_B) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_B) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN20KA) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF20KA) },
{ 0, } /* end of table */
};
@@ -707,6 +717,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
+
+ /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */
+ if (idx >= CGX_RX_STAT_GLOBAL_INDEX)
+ lmac_id = 0;
+
*rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index a383b5ef5b2d..60f085b00a8c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -30,6 +30,8 @@
#define PCI_SUBSYS_DEVID_CNF10K_A 0xBA00
#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00
#define PCI_SUBSYS_DEVID_CN10K_B 0xBD00
+#define PCI_SUBSYS_DEVID_CN20KA 0xC220
+#define PCI_SUBSYS_DEVID_CNF20KA 0xC320
/* PCI BAR nos */
#define PCI_AF_REG_BAR_NUM 0
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 7fa98aeb3663..4a3370a40dd8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -13,19 +13,26 @@
/* RVU LMTST */
#define LMT_TBL_OP_READ 0
#define LMT_TBL_OP_WRITE 1
-#define LMT_MAP_TABLE_SIZE (128 * 1024)
#define LMT_MAPTBL_ENTRY_SIZE 16
+#define LMT_MAX_VFS 256
+
+#define LMT_MAP_ENTRY_ENA BIT_ULL(20)
+#define LMT_MAP_ENTRY_LINES GENMASK_ULL(18, 16)
/* Function to perform operations (read/write) on lmtst map table */
static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
int lmt_tbl_op)
{
void __iomem *lmt_map_base;
- u64 tbl_base;
+ u64 tbl_base, cfg;
+ int pfs, vfs;
tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+ cfg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
+ vfs = 1 << (cfg & 0xF);
+ pfs = 1 << ((cfg >> 4) & 0x7);
- lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
+ lmt_map_base = ioremap_wc(tbl_base, pfs * vfs * LMT_MAPTBL_ENTRY_SIZE);
if (!lmt_map_base) {
dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
return -ENOMEM;
@@ -35,6 +42,13 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
*val = readq(lmt_map_base + index);
} else {
writeq((*val), (lmt_map_base + index));
+
+ cfg = FIELD_PREP(LMT_MAP_ENTRY_ENA, 0x1);
+ /* 2048 LMTLINES */
+ cfg |= FIELD_PREP(LMT_MAP_ENTRY_LINES, 0x6);
+
+ writeq(cfg, (lmt_map_base + (index + 8)));
+
/* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
* changes effective. Write 1 for flush and read is being used as a
* barrier and sets up a data dependency. Write to 0 after a write
@@ -52,7 +66,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
#define LMT_MAP_TBL_W1_OFF 8
static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
{
- return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
+ return ((rvu_get_pf(pcifunc) * LMT_MAX_VFS) +
(pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
}
@@ -69,7 +83,7 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
mutex_lock(&rvu->rsrc_lock);
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
- pf = rvu_get_pf(pcifunc) & 0x1F;
+ pf = rvu_get_pf(pcifunc) & RVU_PFVF_PF_MASK;
val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index a1f9ec03c2ce..c827da626471 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -553,6 +553,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
u64 lmt_addr, val, tbl_base;
int pf, vf, num_vfs, hw_vfs;
void __iomem *lmt_map_base;
+ int apr_pfs, apr_vfs;
int buf_size = 10240;
size_t off = 0;
int index = 0;
@@ -568,8 +569,12 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
return -ENOMEM;
tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+ val = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
+ apr_vfs = 1 << (val & 0xF);
+ apr_pfs = 1 << ((val >> 4) & 0x7);
- lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
+ lmt_map_base = ioremap_wc(tbl_base, apr_pfs * apr_vfs *
+ LMT_MAPTBL_ENTRY_SIZE);
if (!lmt_map_base) {
dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
kfree(buf);
@@ -591,7 +596,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t",
pf);
- index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
+ index = pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE;
off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
(tbl_base + index));
lmt_addr = readq(lmt_map_base + index);
@@ -604,7 +609,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
/* Reading num of VFs per PF */
rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
for (vf = 0; vf < num_vfs; vf++) {
- index = (pf * rvu->hw->total_vfs * 16) +
+ index = (pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE) +
((vf + 1) * LMT_MAPTBL_ENTRY_SIZE);
off += scnprintf(&buf[off], buf_size - 1 - off,
"PF%d:VF%d \t\t", pf, vf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index cb6513ab35e7..69e0778f9ac1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_RVU_ESWITCH) += rvu_rep.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
- otx2_devlink.o qos_sq.o qos.o
+ otx2_devlink.o qos_sq.o qos.o otx2_xsk.o
rvu_nicvf-y := otx2_vf.o
rvu_rep-y := rep.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index a15cc86635d6..c3b6e0f60a79 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -112,9 +112,12 @@ int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
struct otx2_nic *pfvf = dev;
int cnt = cq->pool_ptrs;
u64 ptrs[NPA_MAX_BURST];
+ struct otx2_pool *pool;
dma_addr_t bufptr;
int num_ptrs = 1;
+ pool = &pfvf->qset.pool[cq->cq_idx];
+
/* Refill pool with new buffers */
while (cq->pool_ptrs) {
if (otx2_alloc_buffer(pfvf, cq, &bufptr)) {
@@ -124,7 +127,9 @@ int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
break;
}
cq->pool_ptrs--;
- ptrs[num_ptrs] = (u64)bufptr + OTX2_HEAD_ROOM;
+ ptrs[num_ptrs] = pool->xsk_pool ?
+ (u64)bufptr : (u64)bufptr + OTX2_HEAD_ROOM;
+
num_ptrs++;
if (num_ptrs == NPA_MAX_BURST || cq->pool_ptrs == 0) {
__cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
index f3b9daffaec3..4c7e0f345cb5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -531,7 +531,8 @@ static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
if (sw_tx_sc->encrypt)
sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
- policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
+ policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU,
+ pfvf->netdev->mtu + OTX2_ETH_HLEN);
/* Write SecTag excluding AN bits(1..0) */
policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 2b49bfec7869..92b0dba07853 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -17,6 +17,7 @@
#include "otx2_common.h"
#include "otx2_struct.h"
#include "cn10k.h"
+#include "otx2_xsk.h"
static bool otx2_is_pfc_enabled(struct otx2_nic *pfvf)
{
@@ -549,10 +550,13 @@ static int otx2_alloc_pool_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
}
static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- dma_addr_t *dma)
+ dma_addr_t *dma, int qidx, int idx)
{
u8 *buf;
+ if (pool->xsk_pool)
+ return otx2_xsk_pool_alloc_buf(pfvf, pool, dma, idx);
+
if (pool->page_pool)
return otx2_alloc_pool_buf(pfvf, pool, dma);
@@ -571,12 +575,12 @@ static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
}
int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- dma_addr_t *dma)
+ dma_addr_t *dma, int qidx, int idx)
{
int ret;
local_bh_disable();
- ret = __otx2_alloc_rbuf(pfvf, pool, dma);
+ ret = __otx2_alloc_rbuf(pfvf, pool, dma, qidx, idx);
local_bh_enable();
return ret;
}
@@ -584,7 +588,8 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
dma_addr_t *dma)
{
- if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma)))
+ if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma,
+ cq->cq_idx, cq->pool_ptrs - 1)))
return -ENOMEM;
return 0;
}
@@ -884,7 +889,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
-static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
+int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
{
struct otx2_qset *qset = &pfvf->qset;
struct nix_aq_enq_req *aq;
@@ -1041,12 +1046,13 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
}
-static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
+int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
{
struct otx2_qset *qset = &pfvf->qset;
int err, pool_id, non_xdp_queues;
struct nix_aq_enq_req *aq;
struct otx2_cq_queue *cq;
+ struct otx2_pool *pool;
cq = &qset->cq[qidx];
cq->cq_idx = qidx;
@@ -1055,8 +1061,20 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
cq->cq_type = CQ_RX;
cq->cint_idx = qidx;
cq->cqe_cnt = qset->rqe_cnt;
- if (pfvf->xdp_prog)
+ if (pfvf->xdp_prog) {
xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0);
+ pool = &qset->pool[qidx];
+ if (pool->xsk_pool) {
+ xdp_rxq_info_reg_mem_model(&cq->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
+ xsk_pool_set_rxq_info(pool->xsk_pool, &cq->xdp_rxq);
+ } else if (pool->page_pool) {
+ xdp_rxq_info_reg_mem_model(&cq->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ pool->page_pool);
+ }
+ }
} else if (qidx < non_xdp_queues) {
cq->cq_type = CQ_TX;
cq->cint_idx = qidx - pfvf->hw.rx_queues;
@@ -1275,9 +1293,10 @@ void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool,
pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
page = virt_to_head_page(phys_to_virt(pa));
-
if (pool->page_pool) {
page_pool_put_full_page(pool->page_pool, page, true);
+ } else if (pool->xsk_pool) {
+ /* Note: No way of identifying xdp_buff */
} else {
dma_unmap_page_attrs(pfvf->dev, iova, size,
DMA_FROM_DEVICE,
@@ -1292,6 +1311,7 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
int pool_id, pool_start = 0, pool_end = 0, size = 0;
struct otx2_pool *pool;
u64 iova;
+ int idx;
if (type == AURA_NIX_SQ) {
pool_start = otx2_get_pool_idx(pfvf, type, 0);
@@ -1306,16 +1326,21 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
/* Free SQB and RQB pointers from the aura pool */
for (pool_id = pool_start; pool_id < pool_end; pool_id++) {
- iova = otx2_aura_allocptr(pfvf, pool_id);
pool = &pfvf->qset.pool[pool_id];
+ iova = otx2_aura_allocptr(pfvf, pool_id);
while (iova) {
if (type == AURA_NIX_RQ)
iova -= OTX2_HEAD_ROOM;
-
otx2_free_bufs(pfvf, pool, iova, size);
-
iova = otx2_aura_allocptr(pfvf, pool_id);
}
+
+ for (idx = 0 ; idx < pool->xdp_cnt; idx++) {
+ if (!pool->xdp[idx])
+ continue;
+
+ xsk_buff_free(pool->xdp[idx]);
+ }
}
}
@@ -1332,7 +1357,8 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf)
qmem_free(pfvf->dev, pool->stack);
qmem_free(pfvf->dev, pool->fc_addr);
page_pool_destroy(pool->page_pool);
- pool->page_pool = NULL;
+ devm_kfree(pfvf->dev, pool->xdp);
+ pool->xsk_pool = NULL;
}
devm_kfree(pfvf->dev, pfvf->qset.pool);
pfvf->qset.pool = NULL;
@@ -1419,6 +1445,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
int stack_pages, int numptrs, int buf_size, int type)
{
struct page_pool_params pp_params = { 0 };
+ struct xsk_buff_pool *xsk_pool;
struct npa_aq_enq_req *aq;
struct otx2_pool *pool;
int err;
@@ -1462,21 +1489,35 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
aq->ctype = NPA_AQ_CTYPE_POOL;
aq->op = NPA_AQ_INSTOP_INIT;
- if (type != AURA_NIX_RQ) {
- pool->page_pool = NULL;
+ if (type != AURA_NIX_RQ)
+ return 0;
+
+ if (!test_bit(pool_id, pfvf->af_xdp_zc_qidx)) {
+ pp_params.order = get_order(buf_size);
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
+ pp_params.nid = NUMA_NO_NODE;
+ pp_params.dev = pfvf->dev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pool->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool->page_pool)) {
+ netdev_err(pfvf->netdev, "Creation of page pool failed\n");
+ return PTR_ERR(pool->page_pool);
+ }
return 0;
}
- pp_params.order = get_order(buf_size);
- pp_params.flags = PP_FLAG_DMA_MAP;
- pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
- pp_params.nid = NUMA_NO_NODE;
- pp_params.dev = pfvf->dev;
- pp_params.dma_dir = DMA_FROM_DEVICE;
- pool->page_pool = page_pool_create(&pp_params);
- if (IS_ERR(pool->page_pool)) {
- netdev_err(pfvf->netdev, "Creation of page pool failed\n");
- return PTR_ERR(pool->page_pool);
+ /* Set XSK pool to support AF_XDP zero-copy */
+ xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, pool_id);
+ if (xsk_pool) {
+ pool->xsk_pool = xsk_pool;
+ pool->xdp_cnt = numptrs;
+ pool->xdp = devm_kcalloc(pfvf->dev,
+ numptrs, sizeof(struct xdp_buff *), GFP_KERNEL);
+ if (IS_ERR(pool->xdp)) {
+ netdev_err(pfvf->netdev, "Creation of xsk pool failed\n");
+ return PTR_ERR(pool->xdp);
+ }
}
return 0;
@@ -1537,9 +1578,18 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
}
for (ptr = 0; ptr < num_sqbs; ptr++) {
- err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
- if (err)
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
+ if (err) {
+ if (pool->xsk_pool) {
+ ptr--;
+ while (ptr >= 0) {
+ xsk_buff_free(pool->xdp[ptr]);
+ ptr--;
+ }
+ }
goto err_mem;
+ }
+
pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
}
@@ -1589,11 +1639,19 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
/* Allocate pointers and free them to aura/pool */
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
pool = &pfvf->qset.pool[pool_id];
+
for (ptr = 0; ptr < num_ptrs; ptr++) {
- err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
- if (err)
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
+ if (err) {
+ if (pool->xsk_pool) {
+ while (ptr)
+ xsk_buff_free(pool->xdp[--ptr]);
+ }
return -ENOMEM;
+ }
+
pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
+ pool->xsk_pool ? bufptr :
bufptr + OTX2_HEAD_ROOM);
}
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 65814e3dc93f..7477038d29e2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -21,6 +21,7 @@
#include <linux/time64.h>
#include <linux/dim.h>
#include <uapi/linux/if_macsec.h>
+#include <net/page_pool/helpers.h>
#include <mbox.h>
#include <npc.h>
@@ -349,6 +350,7 @@ struct otx2_flow_config {
struct list_head flow_list_tc;
u8 ucast_flt_cnt;
bool ntuple;
+ u16 ntuple_cnt;
};
struct dev_hw_ops {
@@ -531,6 +533,8 @@ struct otx2_nic {
/* Inline ipsec */
struct cn10k_ipsec ipsec;
+ /* af_xdp zero-copy */
+ unsigned long *af_xdp_zc_qidx;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -1002,7 +1006,7 @@ void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq);
void otx2_free_pending_sqe(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- dma_addr_t *dma);
+ dma_addr_t *dma, int qidx, int idx);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
@@ -1032,6 +1036,8 @@ void otx2_pfaf_mbox_destroy(struct otx2_nic *pf);
void otx2_disable_mbox_intr(struct otx2_nic *pf);
void otx2_disable_napi(struct otx2_nic *pf);
irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq);
+int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura);
+int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx);
/* RSS configuration APIs*/
int otx2_rss_init(struct otx2_nic *pfvf);
@@ -1094,7 +1100,8 @@ int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
-bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf,
+ u64 iova, int len, u16 qidx, u16 flags);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
int otx2_handle_ntuple_tc_features(struct net_device *netdev,
netdev_features_t features);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index 33ec9a7f7c03..e13ae5484c19 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -41,6 +41,7 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
if (!pfvf->flow_cfg)
return 0;
+ pfvf->flow_cfg->ntuple_cnt = ctx->val.vu16;
otx2_alloc_mcam_entries(pfvf, ctx->val.vu16);
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 2d53dc77ef1e..b3f616a7f2e9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -315,7 +315,7 @@ static void otx2_get_pauseparam(struct net_device *netdev,
struct otx2_nic *pfvf = netdev_priv(netdev);
struct cgx_pause_frm_cfg *req, *rsp;
- if (is_otx2_lbkvf(pfvf->pdev))
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
return;
mutex_lock(&pfvf->mbox.lock);
@@ -347,7 +347,7 @@ static int otx2_set_pauseparam(struct net_device *netdev,
if (pause->autoneg)
return -EOPNOTSUPP;
- if (is_otx2_lbkvf(pfvf->pdev))
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
return -EOPNOTSUPP;
if (pause->rx_pause)
@@ -937,8 +937,8 @@ static u32 otx2_get_link(struct net_device *netdev)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- /* LBK link is internal and always UP */
- if (is_otx2_lbkvf(pfvf->pdev))
+ /* LBK and SDP links are internal and always UP */
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev))
return 1;
return pfvf->linfo.link_up;
}
@@ -1409,7 +1409,7 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev,
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- if (is_otx2_lbkvf(pfvf->pdev)) {
+ if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) {
cmd->base.duplex = DUPLEX_FULL;
cmd->base.speed = SPEED_100000;
} else {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 47bfd1fb37d4..64c6d9162ef6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -247,7 +247,7 @@ int otx2_mcam_entry_init(struct otx2_nic *pfvf)
mutex_unlock(&pfvf->mbox.lock);
/* Allocate entries for Ntuple filters */
- count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
+ count = otx2_alloc_mcam_entries(pfvf, flow_cfg->ntuple_cnt);
if (count <= 0) {
otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
return 0;
@@ -307,6 +307,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc);
pf->flow_cfg->ucast_flt_cnt = OTX2_DEFAULT_UNICAST_FLOWS;
+ pf->flow_cfg->ntuple_cnt = OTX2_DEFAULT_FLOWCOUNT;
/* Allocate bare minimum number of MCAM entries needed for
* unicast and ntuple filters.
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index e1dde93e8af8..09a51970851f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -27,6 +27,7 @@
#include "qos.h"
#include <rvu_trace.h>
#include "cn10k_ipsec.h"
+#include "otx2_xsk.h"
#define DRV_NAME "rvu_nicpf"
#define DRV_STRING "Marvell RVU NIC Physical Function Driver"
@@ -1662,9 +1663,7 @@ void otx2_free_hw_resources(struct otx2_nic *pf)
struct nix_lf_free_req *free_req;
struct mbox *mbox = &pf->mbox;
struct otx2_cq_queue *cq;
- struct otx2_pool *pool;
struct msg_req *req;
- int pool_id;
int qidx;
/* Ensure all SQE are processed */
@@ -1705,13 +1704,6 @@ void otx2_free_hw_resources(struct otx2_nic *pf)
/* Free RQ buffer pointers*/
otx2_free_aura_ptr(pf, AURA_NIX_RQ);
- for (qidx = 0; qidx < pf->hw.rx_queues; qidx++) {
- pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx);
- pool = &pf->qset.pool[pool_id];
- page_pool_destroy(pool->page_pool);
- pool->page_pool = NULL;
- }
-
otx2_free_cq_res(pf);
/* Free all ingress bandwidth profiles allocated */
@@ -2691,7 +2683,6 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf,
static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
int qidx)
{
- struct page *page;
u64 dma_addr;
int err = 0;
@@ -2701,11 +2692,11 @@ static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
if (dma_mapping_error(pf->dev, dma_addr))
return -ENOMEM;
- err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
+ err = otx2_xdp_sq_append_pkt(pf, xdpf, dma_addr, xdpf->len,
+ qidx, XDP_REDIRECT);
if (!err) {
otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
- page = virt_to_page(xdpf->data);
- put_page(page);
+ xdp_return_frame(xdpf);
return -ENOMEM;
}
return 0;
@@ -2789,6 +2780,8 @@ static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return otx2_xdp_setup(pf, xdp->prog);
+ case XDP_SETUP_XSK_POOL:
+ return otx2_xsk_pool_setup(pf, xdp->xsk.pool, xdp->xsk.queue_id);
default:
return -EINVAL;
}
@@ -2866,6 +2859,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_vf_vlan = otx2_set_vf_vlan,
.ndo_get_vf_config = otx2_get_vf_config,
.ndo_bpf = otx2_xdp,
+ .ndo_xsk_wakeup = otx2_xsk_wakeup,
.ndo_xdp_xmit = otx2_xdp_xmit,
.ndo_setup_tc = otx2_setup_tc,
.ndo_set_vf_trust = otx2_ndo_set_vf_trust,
@@ -3204,16 +3198,28 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Enable link notifications */
otx2_cgx_config_linkevents(pf, true);
+ pf->af_xdp_zc_qidx = bitmap_zalloc(qcount, GFP_KERNEL);
+ if (!pf->af_xdp_zc_qidx) {
+ err = -ENOMEM;
+ goto err_sriov_cleannup;
+ }
+
#ifdef CONFIG_DCB
err = otx2_dcbnl_set_ops(netdev);
if (err)
- goto err_pf_sriov_init;
+ goto err_free_zc_bmap;
#endif
otx2_qos_init(pf, qos_txqs);
return 0;
+#ifdef CONFIG_DCB
+err_free_zc_bmap:
+ bitmap_free(pf->af_xdp_zc_qidx);
+#endif
+err_sriov_cleannup:
+ otx2_sriov_vfcfg_cleanup(pf);
err_pf_sriov_init:
otx2_shutdown_tc(pf);
err_mcam_flow_del:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 224cef938927..00b6903ba250 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -12,6 +12,7 @@
#include <linux/bpf_trace.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
+#include <net/xdp.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -96,20 +97,16 @@ static unsigned int frag_num(unsigned int i)
static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
struct otx2_snd_queue *sq,
- struct nix_cqe_tx_s *cqe)
+ struct nix_cqe_tx_s *cqe)
{
struct nix_send_comp_s *snd_comp = &cqe->comp;
struct sg_list *sg;
- struct page *page;
- u64 pa;
sg = &sq->sg[snd_comp->sqe_id];
-
- pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]);
- otx2_dma_unmap_page(pfvf, sg->dma_addr[0],
- sg->size[0], DMA_TO_DEVICE);
- page = virt_to_page(phys_to_virt(pa));
- put_page(page);
+ if (sg->flags & XDP_REDIRECT)
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[0], sg->size[0], DMA_TO_DEVICE);
+ xdp_return_frame((struct xdp_frame *)sg->skb);
+ sg->skb = (u64)NULL;
}
static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
@@ -527,9 +524,10 @@ static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_p
int otx2_napi_handler(struct napi_struct *napi, int budget)
{
struct otx2_cq_queue *rx_cq = NULL;
+ struct otx2_cq_queue *cq = NULL;
+ struct otx2_pool *pool = NULL;
struct otx2_cq_poll *cq_poll;
int workdone = 0, cq_idx, i;
- struct otx2_cq_queue *cq;
struct otx2_qset *qset;
struct otx2_nic *pfvf;
int filled_cnt = -1;
@@ -554,6 +552,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
if (rx_cq && rx_cq->pool_ptrs)
filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
+
/* Clear the IRQ */
otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
@@ -566,20 +565,31 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED)
otx2_adjust_adaptive_coalese(pfvf, cq_poll);
+ if (likely(cq))
+ pool = &pfvf->qset.pool[cq->cq_idx];
+
if (unlikely(!filled_cnt)) {
struct refill_work *work;
struct delayed_work *dwork;
- work = &pfvf->refill_wrk[cq->cq_idx];
- dwork = &work->pool_refill_work;
- /* Schedule a task if no other task is running */
- if (!cq->refill_task_sched) {
- work->napi = napi;
- cq->refill_task_sched = true;
- schedule_delayed_work(dwork,
- msecs_to_jiffies(100));
+ if (likely(cq)) {
+ work = &pfvf->refill_wrk[cq->cq_idx];
+ dwork = &work->pool_refill_work;
+ /* Schedule a task if no other task is running */
+ if (!cq->refill_task_sched) {
+ work->napi = napi;
+ cq->refill_task_sched = true;
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(100));
+ }
+ /* Call wake-up for not able to fill buffers */
+ if (pool->xsk_pool)
+ xsk_set_rx_need_wakeup(pool->xsk_pool);
}
} else {
+ /* Clear wake-up, since buffers are filled successfully */
+ if (pool && pool->xsk_pool)
+ xsk_clear_rx_need_wakeup(pool->xsk_pool);
/* Re-enable interrupts */
otx2_write64(pfvf,
NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
@@ -1230,15 +1240,19 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int q
u16 pool_id;
u64 iova;
- if (pfvf->xdp_prog)
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
+ pool = &pfvf->qset.pool[pool_id];
+
+ if (pfvf->xdp_prog) {
+ if (pool->page_pool)
+ xdp_rxq_info_unreg_mem_model(&cq->xdp_rxq);
+
xdp_rxq_info_unreg(&cq->xdp_rxq);
+ }
if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
return;
- pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
- pool = &pfvf->qset.pool[pool_id];
-
while (cq->pend_cqe) {
cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq);
processed_cqe++;
@@ -1359,8 +1373,9 @@ void otx2_free_pending_sqe(struct otx2_nic *pfvf)
}
}
-static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
- int len, int *offset)
+static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq,
+ struct xdp_frame *xdpf,
+ u64 dma_addr, int len, int *offset, u16 flags)
{
struct nix_sqe_sg_s *sg = NULL;
u64 *iova = NULL;
@@ -1377,9 +1392,12 @@ static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
sq->sg[sq->head].dma_addr[0] = dma_addr;
sq->sg[sq->head].size[0] = len;
sq->sg[sq->head].num_segs = 1;
+ sq->sg[sq->head].flags = flags;
+ sq->sg[sq->head].skb = (u64)xdpf;
}
-bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf,
+ u64 iova, int len, u16 qidx, u16 flags)
{
struct nix_sqe_hdr_s *sqe_hdr;
struct otx2_snd_queue *sq;
@@ -1405,7 +1423,7 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
offset = sizeof(*sqe_hdr);
- otx2_xdp_sqe_add_sg(sq, iova, len, &offset);
+ otx2_xdp_sqe_add_sg(sq, xdpf, iova, len, &offset, flags);
sqe_hdr->sizem1 = (offset / 16) - 1;
pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
@@ -1418,14 +1436,28 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq,
bool *need_xdp_flush)
{
+ struct xdp_buff xdp, *xsk_buff = NULL;
unsigned char *hard_start;
+ struct otx2_pool *pool;
+ struct xdp_frame *xdpf;
int qidx = cq->cq_idx;
- struct xdp_buff xdp;
struct page *page;
u64 iova, pa;
u32 act;
int err;
+ pool = &pfvf->qset.pool[qidx];
+
+ if (pool->xsk_pool) {
+ xsk_buff = pool->xdp[--cq->rbpool->xdp_top];
+ if (!xsk_buff)
+ return false;
+
+ xsk_buff->data_end = xsk_buff->data + cqe->sg.seg_size;
+ act = bpf_prog_run_xdp(prog, xsk_buff);
+ goto handle_xdp_verdict;
+ }
+
iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
page = virt_to_page(phys_to_virt(pa));
@@ -1438,37 +1470,57 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
act = bpf_prog_run_xdp(prog, &xdp);
+handle_xdp_verdict:
switch (act) {
case XDP_PASS:
break;
case XDP_TX:
qidx += pfvf->hw.tx_queues;
cq->pool_ptrs++;
- return otx2_xdp_sq_append_pkt(pfvf, iova,
- cqe->sg.seg_size, qidx);
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+ return otx2_xdp_sq_append_pkt(pfvf, xdpf, cqe->sg.seg_addr,
+ cqe->sg.seg_size, qidx, XDP_TX);
case XDP_REDIRECT:
cq->pool_ptrs++;
- err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
+ if (xsk_buff) {
+ err = xdp_do_redirect(pfvf->netdev, xsk_buff, prog);
+ if (!err) {
+ *need_xdp_flush = true;
+ return true;
+ }
+ return false;
+ }
- otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
- DMA_FROM_DEVICE);
+ err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
if (!err) {
*need_xdp_flush = true;
return true;
}
- put_page(page);
+
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+ xdp_return_frame(xdpf);
break;
default:
bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act);
break;
case XDP_ABORTED:
+ if (xsk_buff)
+ xsk_buff_free(xsk_buff);
trace_xdp_exception(pfvf->netdev, prog, act);
break;
case XDP_DROP:
- otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
- DMA_FROM_DEVICE);
- put_page(page);
cq->pool_ptrs++;
+ if (xsk_buff) {
+ xsk_buff_free(xsk_buff);
+ } else if (page->pp) {
+ page_pool_recycle_direct(pool->page_pool, page);
+ } else {
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ }
return true;
}
return false;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index d23810963fdb..8f346fbc8221 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -12,6 +12,7 @@
#include <linux/iommu.h>
#include <linux/if_vlan.h>
#include <net/xdp.h>
+#include <net/xdp_sock_drv.h>
#define LBK_CHAN_BASE 0x000
#define SDP_CHAN_BASE 0x700
@@ -76,6 +77,7 @@ struct otx2_rcv_queue {
struct sg_list {
u16 num_segs;
+ u16 flags;
u64 skb;
u64 size[OTX2_MAX_FRAGS_IN_SQE];
u64 dma_addr[OTX2_MAX_FRAGS_IN_SQE];
@@ -127,7 +129,11 @@ struct otx2_pool {
struct qmem *stack;
struct qmem *fc_addr;
struct page_pool *page_pool;
+ struct xsk_buff_pool *xsk_pool;
+ struct xdp_buff **xdp;
+ u16 xdp_cnt;
u16 rbsize;
+ u16 xdp_top;
};
struct otx2_cq_queue {
@@ -144,6 +150,7 @@ struct otx2_cq_queue {
void *cqe_base;
struct qmem *cqe;
struct otx2_pool *rbpool;
+ bool xsk_zc_en;
struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index e926c6ce96cf..9b28be4c4a5d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -722,15 +722,30 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_shutdown_tc;
+ vf->af_xdp_zc_qidx = bitmap_zalloc(qcount, GFP_KERNEL);
+ if (!vf->af_xdp_zc_qidx) {
+ err = -ENOMEM;
+ goto err_unreg_devlink;
+ }
+
#ifdef CONFIG_DCB
- err = otx2_dcbnl_set_ops(netdev);
- if (err)
- goto err_shutdown_tc;
+ /* Priority flow control is not supported for LBK and SDP vf(s) */
+ if (!(is_otx2_lbkvf(vf->pdev) || is_otx2_sdp_rep(vf->pdev))) {
+ err = otx2_dcbnl_set_ops(netdev);
+ if (err)
+ goto err_free_zc_bmap;
+ }
#endif
otx2_qos_init(vf, qos_txqs);
return 0;
+#ifdef CONFIG_DCB
+err_free_zc_bmap:
+ bitmap_free(vf->af_xdp_zc_qidx);
+#endif
+err_unreg_devlink:
+ otx2_unregister_dl(vf);
err_shutdown_tc:
otx2_shutdown_tc(vf);
err_unreg_netdev:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
new file mode 100644
index 000000000000..894c1e0aea6f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include <linux/bpf_trace.h>
+#include <linux/stringify.h>
+#include <net/xdp_sock_drv.h>
+#include <net/xdp.h>
+
+#include "otx2_common.h"
+#include "otx2_xsk.h"
+
+int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma, int idx)
+{
+ struct xdp_buff *xdp;
+ int delta;
+
+ xdp = xsk_buff_alloc(pool->xsk_pool);
+ if (!xdp)
+ return -ENOMEM;
+
+ pool->xdp[pool->xdp_top++] = xdp;
+ *dma = OTX2_DATA_ALIGN(xsk_buff_xdp_get_dma(xdp));
+ /* Adjust xdp->data for unaligned addresses */
+ delta = *dma - xsk_buff_xdp_get_dma(xdp);
+ xdp->data += delta;
+
+ return 0;
+}
+
+static int otx2_xsk_ctx_disable(struct otx2_nic *pfvf, u16 qidx, int aura_id)
+{
+ struct nix_cn10k_aq_enq_req *cn10k_rq_aq;
+ struct npa_aq_enq_req *aura_aq;
+ struct npa_aq_enq_req *pool_aq;
+ struct nix_aq_enq_req *rq_aq;
+
+ if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ cn10k_rq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!cn10k_rq_aq)
+ return -ENOMEM;
+ cn10k_rq_aq->qidx = qidx;
+ cn10k_rq_aq->rq.ena = 0;
+ cn10k_rq_aq->rq_mask.ena = 1;
+ cn10k_rq_aq->ctype = NIX_AQ_CTYPE_RQ;
+ cn10k_rq_aq->op = NIX_AQ_INSTOP_WRITE;
+ } else {
+ rq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!rq_aq)
+ return -ENOMEM;
+ rq_aq->qidx = qidx;
+ rq_aq->sq.ena = 0;
+ rq_aq->sq_mask.ena = 1;
+ rq_aq->ctype = NIX_AQ_CTYPE_RQ;
+ rq_aq->op = NIX_AQ_INSTOP_WRITE;
+ }
+
+ aura_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aura_aq)
+ goto fail;
+
+ aura_aq->aura_id = aura_id;
+ aura_aq->aura.ena = 0;
+ aura_aq->aura_mask.ena = 1;
+ aura_aq->ctype = NPA_AQ_CTYPE_AURA;
+ aura_aq->op = NPA_AQ_INSTOP_WRITE;
+
+ pool_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!pool_aq)
+ goto fail;
+
+ pool_aq->aura_id = aura_id;
+ pool_aq->pool.ena = 0;
+ pool_aq->pool_mask.ena = 1;
+
+ pool_aq->ctype = NPA_AQ_CTYPE_POOL;
+ pool_aq->op = NPA_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+
+fail:
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ return -ENOMEM;
+}
+
+static void otx2_clean_up_rq(struct otx2_nic *pfvf, int qidx)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_cq_queue *cq;
+ struct otx2_pool *pool;
+ u64 iova;
+
+ /* If the DOWN flag is set SQs are already freed */
+ if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
+ return;
+
+ cq = &qset->cq[qidx];
+ if (cq)
+ otx2_cleanup_rx_cqes(pfvf, cq, qidx);
+
+ pool = &pfvf->qset.pool[qidx];
+ iova = otx2_aura_allocptr(pfvf, qidx);
+ while (iova) {
+ iova -= OTX2_HEAD_ROOM;
+ otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize);
+ iova = otx2_aura_allocptr(pfvf, qidx);
+ }
+
+ mutex_lock(&pfvf->mbox.lock);
+ otx2_xsk_ctx_disable(pfvf, qidx, qidx);
+ mutex_unlock(&pfvf->mbox.lock);
+}
+
+int otx2_xsk_pool_enable(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qidx)
+{
+ u16 rx_queues = pf->hw.rx_queues;
+ u16 tx_queues = pf->hw.tx_queues;
+ int err;
+
+ if (qidx >= rx_queues || qidx >= tx_queues)
+ return -EINVAL;
+
+ err = xsk_pool_dma_map(pool, pf->dev, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ if (err)
+ return err;
+
+ set_bit(qidx, pf->af_xdp_zc_qidx);
+ otx2_clean_up_rq(pf, qidx);
+ /* Kick start the NAPI context so that receiving will start */
+ return otx2_xsk_wakeup(pf->netdev, qidx, XDP_WAKEUP_RX);
+}
+
+int otx2_xsk_pool_disable(struct otx2_nic *pf, u16 qidx)
+{
+ struct net_device *netdev = pf->netdev;
+ struct xsk_buff_pool *pool;
+
+ pool = xsk_get_pool_from_qid(netdev, qidx);
+ if (!pool)
+ return -EINVAL;
+
+ otx2_clean_up_rq(pf, qidx);
+ clear_bit(qidx, pf->af_xdp_zc_qidx);
+ xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+
+ return 0;
+}
+
+int otx2_xsk_pool_setup(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qidx)
+{
+ if (pool)
+ return otx2_xsk_pool_enable(pf, pool, qidx);
+
+ return otx2_xsk_pool_disable(pf, qidx);
+}
+
+int otx2_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
+{
+ struct otx2_nic *pf = netdev_priv(dev);
+ struct otx2_cq_poll *cq_poll = NULL;
+ struct otx2_qset *qset = &pf->qset;
+
+ if (pf->flags & OTX2_FLAG_INTF_DOWN)
+ return -ENETDOWN;
+
+ if (queue_id >= pf->hw.rx_queues)
+ return -EINVAL;
+
+ cq_poll = &qset->napi[queue_id];
+ if (!cq_poll)
+ return -EINVAL;
+
+ /* Trigger interrupt */
+ if (!napi_if_scheduled_mark_missed(&cq_poll->napi))
+ otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), BIT_ULL(0));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h
new file mode 100644
index 000000000000..022b3433edbb
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU PF/VF Netdev Devlink
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef OTX2_XSK_H
+#define OTX2_XSK_H
+
+struct otx2_nic;
+struct xsk_buff_pool;
+
+int otx2_xsk_pool_setup(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qid);
+int otx2_xsk_pool_enable(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qid);
+int otx2_xsk_pool_disable(struct otx2_nic *pf, u16 qid);
+int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma, int idx);
+int otx2_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
+
+#endif /* OTX2_XSK_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
index 9d887bfc3108..c5dbae0e513b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
@@ -82,7 +82,7 @@ static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx)
}
for (ptr = 0; ptr < num_sqbs; ptr++) {
- err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
if (err)
goto sqb_free;
pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 0cd1ecacfd29..22a8b909dd80 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -269,12 +269,8 @@ static const char * const mtk_clks_source_name[] = {
"ethwarp_wocpu2",
"ethwarp_wocpu1",
"ethwarp_wocpu0",
- "top_usxgmii0_sel",
- "top_usxgmii1_sel",
"top_sgm0_sel",
"top_sgm1_sel",
- "top_xfi_phy0_xtal_sel",
- "top_xfi_phy1_xtal_sel",
"top_eth_gmii_sel",
"top_eth_refck_50m_sel",
"top_eth_sys_200m_sel",
@@ -2206,14 +2202,18 @@ skip_rx:
ring->data[idx] = new_data;
rxd->rxd1 = (unsigned int)dma_addr;
release_desc:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
+ if (unlikely(dma_addr == DMA_MAPPING_ERROR))
+ addr64 = FIELD_GET(RX_DMA_ADDR64_MASK,
+ rxd->rxd2);
+ else
+ addr64 = RX_DMA_PREP_ADDR64(dma_addr);
+ }
+
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO;
else
- rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
-
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
- likely(dma_addr != DMA_MAPPING_ERROR))
- rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
+ rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size) | addr64;
ring->calc_idx = idx;
done++;
@@ -3140,11 +3140,19 @@ static int mtk_dma_init(struct mtk_eth *eth)
static void mtk_dma_free(struct mtk_eth *eth)
{
const struct mtk_soc_data *soc = eth->soc;
- int i;
+ int i, j, txqs = 1;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ txqs = MTK_QDMA_NUM_QUEUES;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->netdev[i])
+ continue;
+
+ for (j = 0; j < txqs; j++)
+ netdev_tx_reset_subqueue(eth->netdev[i], j);
+ }
- for (i = 0; i < MTK_MAX_DEVS; i++)
- if (eth->netdev[i])
- netdev_reset_queue(eth->netdev[i]);
if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
dma_free_coherent(eth->dma_dev,
MTK_QDMA_RING_SIZE * soc->tx.desc_size,
@@ -3419,9 +3427,6 @@ static int mtk_open(struct net_device *dev)
}
mtk_gdm_config(eth, target_mac->id, gdm_config);
}
- /* Reset and enable PSE */
- mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
- mtk_w32(eth, 0, MTK_RST_GL);
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
@@ -3997,11 +4002,27 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
if (mtk_is_netsys_v3_or_greater(eth)) {
- /* PSE should not drop port1, port8 and port9 packets */
- mtk_w32(eth, 0x00000302, PSE_DROP_CFG);
+ /* PSE dummy page mechanism */
+ mtk_w32(eth, PSE_DUMMY_WORK_GDM(1) | PSE_DUMMY_WORK_GDM(2) |
+ PSE_DUMMY_WORK_GDM(3) | DUMMY_PAGE_THR, PSE_DUMY_REQ);
+
+ /* PSE free buffer drop threshold */
+ mtk_w32(eth, 0x00600009, PSE_IQ_REV(8));
+
+ /* PSE should not drop port8, port9 and port13 packets from
+ * WDMA Tx
+ */
+ mtk_w32(eth, 0x00002300, PSE_DROP_CFG);
+
+ /* PSE should drop packets to port8, port9 and port13 on WDMA Rx
+ * ring full
+ */
+ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(0));
+ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(1));
+ mtk_w32(eth, 0x00002300, PSE_PPE_DROP(2));
/* GDM and CDM Threshold */
- mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
+ mtk_w32(eth, 0x08000707, MTK_CDMW0_THRES);
mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
/* Disable GDM1 RX CRC stripping */
@@ -4018,7 +4039,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
/* PSE should drop packets to port 8/9 on WDMA Rx ring full */
- mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
+ mtk_w32(eth, 0x00000300, PSE_PPE_DROP(0));
/* PSE Free Queue Flow Control */
mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
@@ -4662,7 +4683,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
}
if (mtk_is_netsys_v3_or_greater(mac->hw) &&
- MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) &&
+ MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW) &&
id == MTK_GMAC1_ID) {
mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
MAC_SYM_PAUSE |
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 8d7b6818d860..0570623e569d 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -151,7 +151,15 @@
#define PSE_FQFC_CFG1 0x100
#define PSE_FQFC_CFG2 0x104
#define PSE_DROP_CFG 0x108
-#define PSE_PPE0_DROP 0x110
+#define PSE_PPE_DROP(x) (0x110 + ((x) * 0x4))
+
+/* PSE Last FreeQ Page Request Control */
+#define PSE_DUMY_REQ 0x10C
+/* PSE_DUMY_REQ is not a typo but actually called like that also in
+ * MediaTek's datasheet
+ */
+#define PSE_DUMMY_WORK_GDM(x) BIT(16 + (x))
+#define DUMMY_PAGE_THR 0x1
/* PSE Input Queue Reservation Register*/
#define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2))
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index f20bb390df3a..c855fb799ce1 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -34,8 +34,10 @@ struct mtk_flow_data {
u16 vlan_in;
struct {
- u16 id;
- __be16 proto;
+ struct {
+ u16 id;
+ __be16 proto;
+ } vlans[2];
u8 num;
} vlan;
struct {
@@ -349,18 +351,19 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
case FLOW_ACTION_CSUM:
break;
case FLOW_ACTION_VLAN_PUSH:
- if (data.vlan.num == 1 ||
+ if (data.vlan.num + data.pppoe.num == 2 ||
act->vlan.proto != htons(ETH_P_8021Q))
return -EOPNOTSUPP;
- data.vlan.id = act->vlan.vid;
- data.vlan.proto = act->vlan.proto;
+ data.vlan.vlans[data.vlan.num].id = act->vlan.vid;
+ data.vlan.vlans[data.vlan.num].proto = act->vlan.proto;
data.vlan.num++;
break;
case FLOW_ACTION_VLAN_POP:
break;
case FLOW_ACTION_PPPOE_PUSH:
- if (data.pppoe.num == 1)
+ if (data.pppoe.num == 1 ||
+ data.vlan.num == 2)
return -EOPNOTSUPP;
data.pppoe.sid = act->pppoe.sid;
@@ -450,12 +453,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
foe.bridge.vlan = data.vlan_in;
- if (data.vlan.num == 1) {
- if (data.vlan.proto != htons(ETH_P_8021Q))
- return -EOPNOTSUPP;
+ for (i = 0; i < data.vlan.num; i++)
+ mtk_foe_entry_set_vlan(eth, &foe, data.vlan.vlans[i].id);
- mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id);
- }
if (data.pppoe.num == 1)
mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 25989c79c92e..c2ab87828d85 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -1163,6 +1163,7 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
struct net_device *ndev = priv->ndev;
unsigned int head = ring->head;
unsigned int entry = ring->tail;
+ unsigned long flags;
while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
ret = mtk_star_tx_complete_one(priv);
@@ -1182,9 +1183,9 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
netif_wake_queue(ndev);
if (napi_complete(napi)) {
- spin_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, flags);
mtk_star_enable_dma_irq(priv, false, true);
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
return 0;
@@ -1341,16 +1342,16 @@ push_new_skb:
static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
{
struct mtk_star_priv *priv;
+ unsigned long flags;
int work_done = 0;
priv = container_of(napi, struct mtk_star_priv, rx_napi);
work_done = mtk_star_rx(priv, budget);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
- spin_lock(&priv->lock);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ spin_lock_irqsave(&priv->lock, flags);
mtk_star_enable_dma_irq(priv, true, false);
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
return work_done;
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index b330020dc0d6..f2bded847e61 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -682,9 +682,9 @@ static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
}
static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
- struct mlx4_db *db, int order)
+ struct mlx4_db *db, unsigned int order)
{
- int o;
+ unsigned int o;
int i;
for (o = order; o <= 1; ++o) {
@@ -712,7 +712,7 @@ found:
return 0;
}
-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, unsigned int order)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_db_pgdir *pgdir;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 1ddb11cb25f9..6e077d202827 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -450,6 +450,8 @@ int mlx4_en_process_tx_cq(struct net_device *dev,
if (unlikely(!priv->port_up))
return 0;
+ if (unlikely(!napi_budget) && cq->type == TX_XDP)
+ return 0;
netdev_txq_bql_complete_prefetchw(ring->tx_queue);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 979fc56205e1..769e683f2488 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -95,8 +95,6 @@ struct page_pool;
#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
-#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
-
/* Keep in sync with mlx5e_mpwrq_log_wqe_sz.
* These are theoretical maximums, which can be further restricted by
* capabilities. These values are used for static resource allocations and
@@ -386,7 +384,6 @@ enum {
MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
MLX5E_SQ_STATE_PENDING_XSK_TX,
MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
- MLX5E_SQ_STATE_XDP_MULTIBUF,
MLX5E_NUM_SQ_STATES, /* Must be kept last */
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 31eb99f09c63..58ec5e44aa7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -10,6 +10,9 @@
#include <net/page_pool/types.h>
#include <net/xdp_sock_drv.h>
+#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
+#define MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ 17
+
static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev)
{
u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size);
@@ -103,18 +106,22 @@ u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
{
u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
- u8 max_pages_per_wqe, max_log_mpwqe_size;
+ u8 max_pages_per_wqe, max_log_wqe_size_calc;
+ u8 max_log_wqe_size_cap;
u16 max_wqe_size;
/* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */
max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB;
max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe),
MLX5_UMR_FLEX_ALIGNMENT) / umr_entry_size;
- max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift;
+ max_log_wqe_size_calc = ilog2(max_pages_per_wqe) + page_shift;
+
+ WARN_ON_ONCE(max_log_wqe_size_calc < MLX5E_ORDER2_MAX_PACKET_MTU);
- WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU);
+ max_log_wqe_size_cap = mlx5_core_is_ecpf(mdev) ?
+ MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ : MLX5_MPWRQ_MAX_LOG_WQE_SZ;
- return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ);
+ return min_t(u8, max_log_wqe_size_calc, max_log_wqe_size_cap);
}
u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
@@ -1242,7 +1249,6 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
- param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk);
mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 3f8986f9d862..bd5877acc5b1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -33,7 +33,6 @@ struct mlx5e_sq_param {
struct mlx5_wq_param wq;
bool is_mpw;
bool is_tls;
- bool is_xdp_mb;
u16 stop_room;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 09433b91be17..dbd9482359e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -16,7 +16,6 @@ static const char * const sq_sw_state_type_name[] = {
[MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE] = "vlan_need_l2_inline",
[MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx",
[MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync",
- [MLX5E_SQ_STATE_XDP_MULTIBUF] = "xdp_multibuf",
};
static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
@@ -177,6 +176,7 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
priv = ptpsq->txqsq.priv;
+ rtnl_lock();
mutex_lock(&priv->state_lock);
chs = &priv->channels;
netdev = priv->netdev;
@@ -184,22 +184,19 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx)
carrier_ok = netif_carrier_ok(netdev);
netif_carrier_off(netdev);
- rtnl_lock();
mlx5e_deactivate_priv_channels(priv);
- rtnl_unlock();
mlx5e_ptp_close(chs->ptp);
err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
- rtnl_lock();
mlx5e_activate_priv_channels(priv);
- rtnl_unlock();
/* return carrier back if needed */
if (carrier_ok)
netif_carrier_on(netdev);
mutex_unlock(&priv->state_lock);
+ rtnl_unlock();
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
index e4e487c8431b..b9cf79e27124 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -165,9 +165,6 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
struct flow_match_enc_keyid enc_keyid;
void *misc_c, *misc_v;
- misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
- misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
-
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
return 0;
@@ -182,6 +179,30 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
err = mlx5e_tc_tun_parse_vxlan_gbp_option(priv, spec, f);
if (err)
return err;
+
+ /* We can't mix custom tunnel headers with symbolic ones and we
+ * don't have a symbolic field name for GBP, so we use custom
+ * tunnel headers in this case. We need hardware support to
+ * match on custom tunnel headers, but we already know it's
+ * supported because the previous call successfully checked for
+ * that.
+ */
+ misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters_5);
+ misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters_5);
+
+ /* Shift by 8 to account for the reserved bits in the vxlan
+ * header after the VNI.
+ */
+ MLX5_SET(fte_match_set_misc5, misc_c, tunnel_header_1,
+ be32_to_cpu(enc_keyid.mask->keyid) << 8);
+ MLX5_SET(fte_match_set_misc5, misc_v, tunnel_header_1,
+ be32_to_cpu(enc_keyid.key->keyid) << 8);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
+
+ return 0;
}
/* match on VNI is required */
@@ -195,6 +216,11 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
+ misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
+ misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+
MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
be32_to_cpu(enc_keyid.mask->keyid));
MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 94b291662087..7a6cc0f4002e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -546,6 +546,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
bool inline_ok;
bool linear;
u16 pi;
+ int i;
struct mlx5e_xdpsq_stats *stats = sq->stats;
@@ -612,41 +613,33 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
- if (test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
- int i;
-
- memset(&cseg->trailer, 0, sizeof(cseg->trailer));
- memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
-
- eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
+ memset(&cseg->trailer, 0, sizeof(cseg->trailer));
+ memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
- for (i = 0; i < num_frags; i++) {
- skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
- dma_addr_t addr;
+ eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
- addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
- page_pool_get_dma_addr(skb_frag_page(frag)) +
- skb_frag_off(frag);
+ for (i = 0; i < num_frags; i++) {
+ skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
+ dma_addr_t addr;
- dseg->addr = cpu_to_be64(addr);
- dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
- dseg->lkey = sq->mkey_be;
- dseg++;
- }
+ addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
+ page_pool_get_dma_addr(skb_frag_page(frag)) +
+ skb_frag_off(frag);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ dseg->addr = cpu_to_be64(addr);
+ dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
+ dseg->lkey = sq->mkey_be;
+ dseg++;
+ }
- sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
- .num_wqebbs = num_wqebbs,
- .num_pkts = 1,
- };
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- sq->pc += num_wqebbs;
- } else {
- cseg->fm_ce_se = 0;
+ sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
+ .num_wqebbs = num_wqebbs,
+ .num_pkts = 1,
+ };
- sq->pc++;
- }
+ sq->pc += num_wqebbs;
xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, eseg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index e7b64679f121..3cf44fbdf5ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -165,6 +165,25 @@ static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
#endif
}
+static void ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_spec *spec)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+
+ if (rx == ipsec->rx_esw) {
+ mlx5_esw_ipsec_rx_rule_add_match_obj(sa_entry, spec);
+ } else {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_2);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_2,
+ sa_entry->ipsec_obj_id | BIT(31));
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+ }
+}
+
static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5e_ipsec_rx *rx)
{
@@ -200,11 +219,8 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
- MLX5_SET(fte_match_param, spec->match_value,
- misc_parameters_2.metadata_reg_c_2,
- sa_entry->ipsec_obj_id | BIT(31));
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -281,10 +297,8 @@ static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_2,
- sa_entry->ipsec_obj_id | BIT(31));
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 8fcaee381b0e..ca56740f6d2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -359,7 +359,7 @@ static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node)
return 0;
err_nomem:
- kvfree(shampo->bitmap);
+ bitmap_free(shampo->bitmap);
kvfree(shampo->pages);
return -ENOMEM;
@@ -367,7 +367,7 @@ err_nomem:
static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
{
- kvfree(rq->mpwqe.shampo->bitmap);
+ bitmap_free(rq->mpwqe.shampo->bitmap);
kvfree(rq->mpwqe.shampo->pages);
}
@@ -2023,41 +2023,12 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.min_inline_mode = sq->min_inline_mode;
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- if (param->is_xdp_mb)
- set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
-
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_xdpsq;
mlx5e_set_xmit_fp(sq, param->is_mpw);
- if (!param->is_mpw && !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
- unsigned int ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
- unsigned int inline_hdr_sz = 0;
- int i;
-
- if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
- inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
- ds_cnt++;
- }
-
- /* Pre initialize fixed WQE fields */
- for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
-
- sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
- .num_wqebbs = 1,
- .num_pkts = 1,
- };
-
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
- }
- }
-
return 0;
err_free_xdpsq:
@@ -3816,8 +3787,11 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
/* MQPRIO is another toplevel qdisc that can't be attached
* simultaneously with the offloaded HTB.
*/
- if (WARN_ON(mlx5e_selq_is_htb_enabled(&priv->selq)))
- return -EINVAL;
+ if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
+ NL_SET_ERR_MSG_MOD(mqprio->extack,
+ "MQPRIO cannot be configured when HTB offload is enabled.");
+ return -EOPNOTSUPP;
+ }
switch (mqprio->mode) {
case TC_MQPRIO_MODE_DCB:
@@ -4373,6 +4347,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n");
+ features &= ~NETIF_F_HW_MACSEC;
+ if (netdev->features & NETIF_F_HW_MACSEC)
+ netdev_warn(netdev, "Disabling HW MACsec offload, not supported in switchdev mode\n");
+
return features;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index fdff9fd8a89e..07f38f472a27 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -65,6 +65,7 @@
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
#define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
+#define MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE 0x8
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
@@ -855,6 +856,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
/* RQ */
mlx5e_build_rq_params(mdev, params);
+ if (!mlx5e_is_uplink_rep(priv) && mlx5_core_is_ecpf(mdev))
+ params->log_rq_mtu_frames = MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE;
/* If netdev is already registered (e.g. move from nic profile to uplink,
* RTNL lock must be held before triggering netdev notifiers.
@@ -886,6 +889,8 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
netdev->watchdog_timeo = 15 * HZ;
+ if (mlx5_core_is_ecpf(mdev))
+ netdev->tx_queue_len = 1 << MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 1d60465cc2ca..2f7a543feca6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -166,6 +166,9 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
struct udphdr *udph;
struct iphdr *iph;
+ if (skb_linearize(skb))
+ goto out;
+
/* We are only going to peek, no need to clone the SKB */
if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb))
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9ba99609999f..f1d908f61134 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1750,9 +1750,6 @@ extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr
!list_is_first(&attr->list, &flow->attrs))
return 0;
- if (flow_flag_test(flow, SLOW))
- return 0;
-
esw_attr = attr->esw_attr;
if (!esw_attr->split_count ||
esw_attr->split_count == esw_attr->out_count - 1)
@@ -1766,7 +1763,7 @@ extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr
for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
/* external dest with encap is considered as internal by firmware */
if (esw_attr->dests[i].vport == MLX5_VPORT_UPLINK &&
- !(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID))
+ !(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
ext_dest = true;
else
int_dest = true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
index ed977ae75fab..4bba2884c1c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -85,6 +85,19 @@ err_header_alloc:
return err;
}
+void mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_spec *spec)
+{
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_1,
+ ESW_IPSEC_RX_MAPPED_ID_MATCH_MASK);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_1,
+ sa_entry->rx_mapped_id << ESW_ZONE_ID_BITS);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+}
+
void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
index ac9c65b89166..514c15258b1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
@@ -20,6 +20,8 @@ int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr);
void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev);
+void mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_spec *spec);
#else
static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr) {}
@@ -48,5 +50,8 @@ static inline void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr) {}
static inline void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev) {}
+static inline void
+mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_spec *spec) {}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESW_IPSEC_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
index 45183de424f3..76382626ad41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -96,7 +96,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
if (!flow_group_in)
return -ENOMEM;
- ft_attr.max_fte = POOL_NEXT_SIZE;
+ ft_attr.max_fte = MLX5_FS_MAX_POOL_SIZE;
ft_attr.prio = LEGACY_FDB_PRIO;
fdb = mlx5_create_flow_table(root_ns, &ft_attr);
if (IS_ERR(fdb)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 823c1ba456cd..803bacf2a95e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -305,8 +305,9 @@ static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node,
return 0;
}
-static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id,
- u32 *tsar_ix)
+static int
+esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id,
+ u32 max_rate, u32 bw_share, u32 *tsar_ix)
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
void *attr;
@@ -323,6 +324,8 @@ static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_
SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
MLX5_SET(scheduling_context, tsar_ctx, parent_element_id,
parent_element_id);
+ MLX5_SET(scheduling_context, tsar_ctx, max_average_bw, max_rate);
+ MLX5_SET(scheduling_context, tsar_ctx, bw_share, bw_share);
attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
MLX5_SET(tsar_element, attr, tsar_type, TSAR_ELEMENT_TSAR_TYPE_DWRR);
@@ -396,7 +399,8 @@ __esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct mlx5_esw_sch
u32 tsar_ix;
int err;
- err = esw_qos_create_node_sched_elem(esw->dev, esw->qos.root_tsar_ix, &tsar_ix);
+ err = esw_qos_create_node_sched_elem(esw->dev, esw->qos.root_tsar_ix, 0,
+ 0, &tsar_ix);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "E-Switch create TSAR for node failed");
return ERR_PTR(err);
@@ -463,7 +467,8 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
return -EOPNOTSUPP;
- err = esw_qos_create_node_sched_elem(esw->dev, 0, &esw->qos.root_tsar_ix);
+ err = esw_qos_create_node_sched_elem(esw->dev, 0, 0, 0,
+ &esw->qos.root_tsar_ix);
if (err) {
esw_warn(dev, "E-Switch create root TSAR failed (%d)\n", err);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 20cc01ceee8a..2e0920199d47 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -3532,7 +3532,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
int err;
mutex_init(&esw->offloads.termtbl_mutex);
- mlx5_rdma_enable_roce(esw->dev);
+ err = mlx5_rdma_enable_roce(esw->dev);
+ if (err)
+ goto err_roce;
err = mlx5_esw_host_number_init(esw);
if (err)
@@ -3593,6 +3595,7 @@ err_vport_metadata:
esw_offloads_metadata_uninit(esw);
err_metadata:
mlx5_rdma_disable_roce(esw->dev);
+err_roce:
mutex_destroy(&esw->offloads.termtbl_mutex);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index d91ea53eb394..fc6e56305cbb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -163,11 +163,16 @@ static int temp_warn(struct notifier_block *nb, unsigned long type, void *data)
u64 value_msb;
value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb);
+ /* bit 1-63 are not supported for NICs,
+ * hence read only bit 0 (asic) from lsb.
+ */
+ value_lsb &= 0x1;
value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
- mlx5_core_warn(events->dev,
- "High temperature on sensors with bit set %llx %llx",
- value_msb, value_lsb);
+ if (net_ratelimit())
+ mlx5_core_warn(events->dev,
+ "High temperature on sensors with bit set %llx %llx",
+ value_msb, value_lsb);
return NOTIFY_OK;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
index c14590acc772..f6abfd00d7e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
@@ -50,10 +50,12 @@ mlx5_ft_pool_get_avail_sz(struct mlx5_core_dev *dev, enum fs_flow_table_type tab
int i, found_i = -1;
for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
- if (dev->priv.ft_pool->ft_left[i] && FT_POOLS[i] >= desired_size &&
+ if (dev->priv.ft_pool->ft_left[i] &&
+ (FT_POOLS[i] >= desired_size ||
+ desired_size == MLX5_FS_MAX_POOL_SIZE) &&
FT_POOLS[i] <= max_ft_size) {
found_i = i;
- if (desired_size != POOL_NEXT_SIZE)
+ if (desired_size != MLX5_FS_MAX_POOL_SIZE)
break;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
index 25f4274b372b..173e312db720 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
@@ -7,8 +7,6 @@
#include <linux/mlx5/driver.h>
#include "fs_core.h"
-#define POOL_NEXT_SIZE 0
-
int mlx5_ft_pool_init(struct mlx5_core_dev *dev);
void mlx5_ft_pool_destroy(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index a6329ca2d9bf..52c8035547be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -799,6 +799,7 @@ static void poll_health(struct timer_list *t)
health->prev = count;
if (health->miss_counter == MAX_MISSES) {
mlx5_core_err(dev, "device's health compromised - reached miss count\n");
+ health->synd = ioread8(&h->synd);
print_health_info(dev);
queue_work(health->wq, &health->report_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index 711d14dea248..d313cb7f0ed8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -161,7 +161,8 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
- sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE;
+ sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
+ FT_TBL_SZ : MLX5_FS_MAX_POOL_SIZE;
ft_attr.max_fte = sz;
/* We use chains_default_ft(chains) as the table's next_ft till
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
index 9f13cea16446..43b2216bc0a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
@@ -618,10 +618,6 @@ struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
bool use_l4_type;
int err;
- ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
- if (!ttc)
- return ERR_PTR(-ENOMEM);
-
switch (params->ns_type) {
case MLX5_FLOW_NAMESPACE_PORT_SEL:
use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
@@ -635,7 +631,16 @@ struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
return ERR_PTR(-EINVAL);
}
+ ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
+ if (!ttc)
+ return ERR_PTR(-ENOMEM);
+
ns = mlx5_get_flow_namespace(dev, params->ns_type);
+ if (!ns) {
+ kvfree(ttc);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
groups = use_l4_type ? &inner_ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
&inner_ttc_groups[TTC_GROUPS_DEFAULT];
@@ -691,10 +696,6 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
bool use_l4_type;
int err;
- ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
- if (!ttc)
- return ERR_PTR(-ENOMEM);
-
switch (params->ns_type) {
case MLX5_FLOW_NAMESPACE_PORT_SEL:
use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
@@ -708,7 +709,16 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
return ERR_PTR(-EINVAL);
}
+ ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
+ if (!ttc)
+ return ERR_PTR(-ENOMEM);
+
ns = mlx5_get_flow_namespace(dev, params->ns_type);
+ if (!ns) {
+ kvfree(ttc);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
groups = use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
&ttc_groups[TTC_GROUPS_DEFAULT];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index a42f6cd99b74..5c552b71e371 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -118,8 +118,8 @@ static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *
static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
{
+ u8 mac[ETH_ALEN] = {};
union ib_gid gid;
- u8 mac[ETH_ALEN];
mlx5_rdma_make_default_gid(dev, &gid);
return mlx5_core_roce_gid_set(dev, 0,
@@ -140,17 +140,17 @@ void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev)
mlx5_nic_vport_disable_roce(dev);
}
-void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
+int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
{
int err;
if (!MLX5_CAP_GEN(dev, roce))
- return;
+ return 0;
err = mlx5_nic_vport_enable_roce(dev);
if (err) {
mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
- return;
+ return err;
}
err = mlx5_rdma_add_roce_addr(dev);
@@ -165,10 +165,11 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
goto del_roce_addr;
}
- return;
+ return err;
del_roce_addr:
mlx5_rdma_del_roce_addr(dev);
disable_roce:
mlx5_nic_vport_disable_roce(dev);
+ return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
index 750cff2a71a4..3d9e76c3d42f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
@@ -8,12 +8,12 @@
#ifdef CONFIG_MLX5_ESWITCH
-void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
+int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
-static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {}
+static inline int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {}
#endif /* CONFIG_MLX5_ESWITCH */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 7d6d859cef3f..511cd92e0e3e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -3014,6 +3014,9 @@ static int mlxsw_sp_neigh_rif_made_sync(struct mlxsw_sp *mlxsw_sp,
.rif = rif,
};
+ if (!mlxsw_sp_dev_lower_is_port(mlxsw_sp_rif_dev(rif)))
+ return 0;
+
neigh_for_each(&arp_tbl, mlxsw_sp_neigh_rif_made_sync_each, &rms);
if (rms.err)
goto err_arp;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
index 02bb81b3c506..bf1655edeed2 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -785,8 +785,10 @@ enum {
/* PUL User Registers */
#define FBNIC_CSR_START_PUL_USER 0x31000 /* CSR section delimiter */
#define FBNIC_PUL_OB_TLP_HDR_AW_CFG 0x3103d /* 0xc40f4 */
+#define FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH CSR_BIT(19)
#define FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME CSR_BIT(18)
#define FBNIC_PUL_OB_TLP_HDR_AR_CFG 0x3103e /* 0xc40f8 */
+#define FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH CSR_BIT(19)
#define FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME CSR_BIT(18)
#define FBNIC_CSR_END_PUL_USER 0x31080 /* CSR section delimiter */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
index bbc7c1c0c37e..cdd6c3a21094 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -17,11 +17,29 @@ static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx,
{
u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
+ /* Write the upper 32b and then the lower 32b. Doing this the
+ * FW can then read lower, upper, lower to verify that the state
+ * of the descriptor wasn't changed mid-transaction.
+ */
fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc));
fw_wrfl(fbd);
fw_wr32(fbd, desc_offset, lower_32_bits(desc));
}
+static void __fbnic_mbx_invalidate_desc(struct fbnic_dev *fbd, int mbx_idx,
+ int desc_idx, u32 desc)
+{
+ u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
+
+ /* For initialization we write the lower 32b of the descriptor first.
+ * This way we can set the state to mark it invalid before we clear the
+ * upper 32b.
+ */
+ fw_wr32(fbd, desc_offset, desc);
+ fw_wrfl(fbd);
+ fw_wr32(fbd, desc_offset + 1, 0);
+}
+
static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
{
u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
@@ -33,29 +51,41 @@ static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
return desc;
}
-static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+static void fbnic_mbx_reset_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
{
int desc_idx;
+ /* Disable DMA transactions from the device,
+ * and flush any transactions triggered during cleaning
+ */
+ switch (mbx_idx) {
+ case FBNIC_IPC_MBX_RX_IDX:
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH);
+ break;
+ case FBNIC_IPC_MBX_TX_IDX:
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH);
+ break;
+ }
+
+ wrfl(fbd);
+
/* Initialize first descriptor to all 0s. Doing this gives us a
* solid stop for the firmware to hit when it is done looping
* through the ring.
*/
- __fbnic_mbx_wr_desc(fbd, mbx_idx, 0, 0);
-
- fw_wrfl(fbd);
+ __fbnic_mbx_invalidate_desc(fbd, mbx_idx, 0, 0);
/* We then fill the rest of the ring starting at the end and moving
* back toward descriptor 0 with skip descriptors that have no
* length nor address, and tell the firmware that they can skip
* them and just move past them to the one we initialized to 0.
*/
- for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;) {
- __fbnic_mbx_wr_desc(fbd, mbx_idx, desc_idx,
- FBNIC_IPC_MBX_DESC_FW_CMPL |
- FBNIC_IPC_MBX_DESC_HOST_CMPL);
- fw_wrfl(fbd);
- }
+ for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;)
+ __fbnic_mbx_invalidate_desc(fbd, mbx_idx, desc_idx,
+ FBNIC_IPC_MBX_DESC_FW_CMPL |
+ FBNIC_IPC_MBX_DESC_HOST_CMPL);
}
void fbnic_mbx_init(struct fbnic_dev *fbd)
@@ -76,7 +106,7 @@ void fbnic_mbx_init(struct fbnic_dev *fbd)
wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
- fbnic_mbx_init_desc_ring(fbd, i);
+ fbnic_mbx_reset_desc_ring(fbd, i);
}
static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
@@ -141,7 +171,7 @@ static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
{
int i;
- fbnic_mbx_init_desc_ring(fbd, mbx_idx);
+ fbnic_mbx_reset_desc_ring(fbd, mbx_idx);
for (i = FBNIC_IPC_MBX_DESC_LEN; i--;)
fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i);
@@ -322,67 +352,41 @@ static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type)
return err;
}
-/**
- * fbnic_fw_xmit_cap_msg - Allocate and populate a FW capabilities message
- * @fbd: FBNIC device structure
- *
- * Return: NULL on failure to allocate, error pointer on error, or pointer
- * to new TLV test message.
- *
- * Sends a single TLV header indicating the host wants the firmware to
- * confirm the capabilities and version.
- **/
-static int fbnic_fw_xmit_cap_msg(struct fbnic_dev *fbd)
-{
- int err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
-
- /* Return 0 if we are not calling this on ASIC */
- return (err == -EOPNOTSUPP) ? 0 : err;
-}
-
-static void fbnic_mbx_postinit_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
{
struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
- /* This is a one time init, so just exit if it is completed */
- if (mbx->ready)
- return;
-
mbx->ready = true;
switch (mbx_idx) {
case FBNIC_IPC_MBX_RX_IDX:
+ /* Enable DMA writes from the device */
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
+
/* Make sure we have a page for the FW to write to */
fbnic_mbx_alloc_rx_msgs(fbd);
break;
case FBNIC_IPC_MBX_TX_IDX:
- /* Force version to 1 if we successfully requested an update
- * from the firmware. This should be overwritten once we get
- * the actual version from the firmware in the capabilities
- * request message.
- */
- if (!fbnic_fw_xmit_cap_msg(fbd) &&
- !fbd->fw_cap.running.mgmt.version)
- fbd->fw_cap.running.mgmt.version = 1;
+ /* Enable DMA reads from the device */
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
break;
}
}
-static void fbnic_mbx_postinit(struct fbnic_dev *fbd)
+static bool fbnic_mbx_event(struct fbnic_dev *fbd)
{
- int i;
-
- /* We only need to do this on the first interrupt following init.
+ /* We only need to do this on the first interrupt following reset.
* this primes the mailbox so that we will have cleared all the
* skip descriptors.
*/
if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY)))
- return;
+ return false;
wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
- for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
- fbnic_mbx_postinit_desc_ring(fbd, i);
+ return true;
}
/**
@@ -743,9 +747,9 @@ free_message:
}
static const struct fbnic_tlv_index fbnic_tsene_read_resp_index[] = {
- FBNIC_TLV_ATTR_S32(FBNIC_TSENE_THERM),
- FBNIC_TLV_ATTR_S32(FBNIC_TSENE_VOLT),
- FBNIC_TLV_ATTR_S32(FBNIC_TSENE_ERROR),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_THERM),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_VOLT),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_ERROR),
FBNIC_TLV_ATTR_LAST
};
@@ -762,21 +766,21 @@ static int fbnic_fw_parse_tsene_read_resp(void *opaque,
if (!cmpl_data)
return -EINVAL;
- if (results[FBNIC_TSENE_ERROR]) {
- err = fbnic_tlv_attr_get_unsigned(results[FBNIC_TSENE_ERROR]);
+ if (results[FBNIC_FW_TSENE_ERROR]) {
+ err = fbnic_tlv_attr_get_unsigned(results[FBNIC_FW_TSENE_ERROR]);
if (err)
goto exit_complete;
}
- if (!results[FBNIC_TSENE_THERM] || !results[FBNIC_TSENE_VOLT]) {
+ if (!results[FBNIC_FW_TSENE_THERM] || !results[FBNIC_FW_TSENE_VOLT]) {
err = -EINVAL;
goto exit_complete;
}
cmpl_data->u.tsene.millidegrees =
- fbnic_tlv_attr_get_signed(results[FBNIC_TSENE_THERM]);
+ fbnic_tlv_attr_get_signed(results[FBNIC_FW_TSENE_THERM]);
cmpl_data->u.tsene.millivolts =
- fbnic_tlv_attr_get_signed(results[FBNIC_TSENE_VOLT]);
+ fbnic_tlv_attr_get_signed(results[FBNIC_FW_TSENE_VOLT]);
exit_complete:
cmpl_data->result = err;
@@ -864,7 +868,7 @@ next_page:
void fbnic_mbx_poll(struct fbnic_dev *fbd)
{
- fbnic_mbx_postinit(fbd);
+ fbnic_mbx_event(fbd);
fbnic_mbx_process_tx_msgs(fbd);
fbnic_mbx_process_rx_msgs(fbd);
@@ -872,60 +876,97 @@ void fbnic_mbx_poll(struct fbnic_dev *fbd)
int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
{
- struct fbnic_fw_mbx *tx_mbx;
- int attempts = 50;
+ unsigned long timeout = jiffies + 10 * HZ + 1;
+ int err, i;
- /* Immediate fail if BAR4 isn't there */
- if (!fbnic_fw_present(fbd))
- return -ENODEV;
+ do {
+ if (!time_is_after_jiffies(timeout))
+ return -ETIMEDOUT;
- tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
- while (!tx_mbx->ready && --attempts) {
/* Force the firmware to trigger an interrupt response to
* avoid the mailbox getting stuck closed if the interrupt
* is reset.
*/
- fbnic_mbx_init_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
+ fbnic_mbx_reset_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
- msleep(200);
+ /* Immediate fail if BAR4 went away */
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
- fbnic_mbx_poll(fbd);
- }
+ msleep(20);
+ } while (!fbnic_mbx_event(fbd));
+
+ /* FW has shown signs of life. Enable DMA and start Tx/Rx */
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+ fbnic_mbx_init_desc_ring(fbd, i);
+
+ /* Request an update from the firmware. This should overwrite
+ * mgmt.version once we get the actual version from the firmware
+ * in the capabilities request message.
+ */
+ err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
+ if (err)
+ goto clean_mbx;
+
+ /* Use "1" to indicate we entered the state waiting for a response */
+ fbd->fw_cap.running.mgmt.version = 1;
+
+ return 0;
+clean_mbx:
+ /* Cleanup Rx buffers and disable mailbox */
+ fbnic_mbx_clean(fbd);
+ return err;
+}
+
+static void __fbnic_fw_evict_cmpl(struct fbnic_fw_completion *cmpl_data)
+{
+ cmpl_data->result = -EPIPE;
+ complete(&cmpl_data->done);
+}
- return attempts ? 0 : -ETIMEDOUT;
+static void fbnic_mbx_evict_all_cmpl(struct fbnic_dev *fbd)
+{
+ if (fbd->cmpl_data) {
+ __fbnic_fw_evict_cmpl(fbd->cmpl_data);
+ fbd->cmpl_data = NULL;
+ }
}
void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
{
+ unsigned long timeout = jiffies + 10 * HZ + 1;
struct fbnic_fw_mbx *tx_mbx;
- int attempts = 50;
- u8 count = 0;
-
- /* Nothing to do if there is no mailbox */
- if (!fbnic_fw_present(fbd))
- return;
+ u8 tail;
/* Record current Rx stats */
tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
- /* Nothing to do if mailbox never got to ready */
- if (!tx_mbx->ready)
- return;
+ spin_lock_irq(&fbd->fw_tx_lock);
+
+ /* Clear ready to prevent any further attempts to transmit */
+ tx_mbx->ready = false;
+
+ /* Read tail to determine the last tail state for the ring */
+ tail = tx_mbx->tail;
+
+ /* Flush any completions as we are no longer processing Rx */
+ fbnic_mbx_evict_all_cmpl(fbd);
+
+ spin_unlock_irq(&fbd->fw_tx_lock);
/* Give firmware time to process packet,
- * we will wait up to 10 seconds which is 50 waits of 200ms.
+ * we will wait up to 10 seconds which is 500 waits of 20ms.
*/
do {
u8 head = tx_mbx->head;
- if (head == tx_mbx->tail)
+ /* Tx ring is empty once head == tail */
+ if (head == tail)
break;
- msleep(200);
+ msleep(20);
fbnic_mbx_process_tx_msgs(fbd);
-
- count += (tx_mbx->head - head) % FBNIC_IPC_MBX_DESC_LEN;
- } while (count < FBNIC_IPC_MBX_DESC_LEN && --attempts);
+ } while (time_is_after_jiffies(timeout));
}
void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
index fe68333d51b1..a3618e7826c2 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -139,10 +139,10 @@ enum {
};
enum {
- FBNIC_TSENE_THERM = 0x0,
- FBNIC_TSENE_VOLT = 0x1,
- FBNIC_TSENE_ERROR = 0x2,
- FBNIC_TSENE_MSG_MAX
+ FBNIC_FW_TSENE_THERM = 0x0,
+ FBNIC_FW_TSENE_VOLT = 0x1,
+ FBNIC_FW_TSENE_ERROR = 0x2,
+ FBNIC_FW_TSENE_MSG_MAX
};
enum {
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
index 14291401f463..dde4a37116e2 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -79,12 +79,6 @@ static void fbnic_mac_init_axi(struct fbnic_dev *fbd)
fbnic_init_readrq(fbd, FBNIC_QM_RNI_RBP_CTL, cls, readrq);
fbnic_init_mps(fbd, FBNIC_QM_RNI_RDE_CTL, cls, mps);
fbnic_init_mps(fbd, FBNIC_QM_RNI_RCM_CTL, cls, mps);
-
- /* Enable XALI AR/AW outbound */
- wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
- FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
- wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
- FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
}
static void fbnic_mac_init_qm(struct fbnic_dev *fbd)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index 7a96b6ee773f..1db57c42333e 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -628,6 +628,8 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
fbnic_rss_key_fill(fbn->rss_key);
fbnic_rss_init_en_mask(fbn);
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
netdev->features |=
NETIF_F_RXHASH |
NETIF_F_SG |
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 23760b613d3e..a70b88037a20 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1815,6 +1815,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
if (nr_frags <= 0) {
tx->frame_data0 |= TX_DESC_DATA0_LS_;
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ tx->frame_last = tx->frame_first;
}
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
@@ -1884,6 +1885,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
tx->frame_first = 0;
tx->frame_data0 = 0;
tx->frame_tail = 0;
+ tx->frame_last = 0;
return -ENOMEM;
}
@@ -1924,16 +1926,18 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
TX_DESC_DATA0_DTYPE_DATA_) {
tx->frame_data0 |= TX_DESC_DATA0_LS_;
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ tx->frame_last = tx->frame_tail;
}
- tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
- buffer_info = &tx->buffer_info[tx->frame_tail];
+ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_last];
+ buffer_info = &tx->buffer_info[tx->frame_last];
buffer_info->skb = skb;
if (time_stamp)
buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
if (ignore_sync)
buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
+ tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
tx->last_tail = tx->frame_tail;
@@ -3491,6 +3495,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
struct pci_dev *pdev)
{
struct lan743x_tx *tx;
+ u32 sgmii_ctl;
int index;
int ret;
@@ -3503,6 +3508,15 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
spin_lock_init(&adapter->eth_syslock_spinlock);
mutex_init(&adapter->sgmii_rw_lock);
pci11x1x_set_rfe_rd_fifo_threshold(adapter);
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ if (adapter->is_sgmii_en) {
+ sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
+ sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
+ } else {
+ sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
+ sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
+ }
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
} else {
adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
@@ -3554,7 +3568,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
{
- u32 sgmii_ctl;
int ret;
adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
@@ -3566,10 +3579,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
adapter->mdiobus->priv = (void *)adapter;
if (adapter->is_pci11x1x) {
if (adapter->is_sgmii_en) {
- sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
- sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
- sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
- lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
netif_dbg(adapter, drv, adapter->netdev,
"SGMII operation\n");
adapter->mdiobus->read = lan743x_mdiobus_read_c22;
@@ -3580,10 +3589,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
netif_dbg(adapter, drv, adapter->netdev,
"lan743x-mdiobus-c45\n");
} else {
- sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
- sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
- sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
- lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
netif_dbg(adapter, drv, adapter->netdev,
"RGMII operation\n");
// Only C22 support when RGMII I/F
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 7f73d66854be..db5fc73e41cc 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -980,6 +980,7 @@ struct lan743x_tx {
u32 frame_first;
u32 frame_data0;
u32 frame_tail;
+ u32 frame_last;
struct lan743x_tx_buffer_info *buffer_info;
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 638ef64d639f..f412e17b0d50 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1047,7 +1047,7 @@ static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req,
header->inline_oob_size_div4 = client_oob_size / sizeof(u32);
if (oob_in_sgl) {
- WARN_ON_ONCE(!pad_data || wqe_req->num_sge < 2);
+ WARN_ON_ONCE(wqe_req->num_sge < 2);
header->client_oob_in_sgl = 1;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index ef93df520887..08bee56aea35 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -830,6 +830,7 @@ EXPORT_SYMBOL(ocelot_vlan_prepare);
int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
bool untagged)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
int err;
/* Ignore VID 0 added to our RX filter by the 8021q module, since
@@ -849,6 +850,11 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
ocelot_bridge_vlan_find(ocelot, vid));
if (err)
return err;
+ } else if (ocelot_port->pvid_vlan &&
+ ocelot_bridge_vlan_find(ocelot, vid) == ocelot_port->pvid_vlan) {
+ err = ocelot_port_set_pvid(ocelot, port, NULL);
+ if (err)
+ return err;
}
/* Untagged egress vlan clasification */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 99df00c30b8c..b5d744d2586f 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -203,7 +203,7 @@ static struct pci_driver qede_pci_driver = {
};
static struct qed_eth_cb_ops qede_ll_ops = {
- {
+ .common = {
#ifdef CONFIG_RFS_ACCEL
.arfs_filter_op = qede_arfs_filter_op,
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 28d24d59efb8..d57b976b9040 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -1484,8 +1484,11 @@ static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_o
}
cmd_op = (cmd.rsp.arg[0] & 0xff);
- if (cmd.rsp.arg[0] >> 25 == 2)
- return 2;
+ if (cmd.rsp.arg[0] >> 25 == 2) {
+ ret = 2;
+ goto out;
+ }
+
if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
set_bit(QLC_BC_VF_STATE, &vf->state);
else
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 5a5eba49c651..267105ba9274 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -89,6 +89,7 @@
#define JUMBO_6K (6 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
#define JUMBO_7K (7 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
#define JUMBO_9K (9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
+#define JUMBO_16K (SZ_16K - VLAN_ETH_HLEN - ETH_FCS_LEN)
static const struct {
const char *name;
@@ -2850,6 +2851,32 @@ static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
RTL_R32(tp, CSIDR) : ~0;
}
+static void rtl_disable_zrxdc_timeout(struct rtl8169_private *tp)
+{
+ struct pci_dev *pdev = tp->pci_dev;
+ u32 csi;
+ int rc;
+ u8 val;
+
+#define RTL_GEN3_RELATED_OFF 0x0890
+#define RTL_GEN3_ZRXDC_NONCOMPL 0x1
+ if (pdev->cfg_size > RTL_GEN3_RELATED_OFF) {
+ rc = pci_read_config_byte(pdev, RTL_GEN3_RELATED_OFF, &val);
+ if (rc == PCIBIOS_SUCCESSFUL) {
+ val &= ~RTL_GEN3_ZRXDC_NONCOMPL;
+ rc = pci_write_config_byte(pdev, RTL_GEN3_RELATED_OFF,
+ val);
+ if (rc == PCIBIOS_SUCCESSFUL)
+ return;
+ }
+ }
+
+ netdev_notice_once(tp->dev,
+ "No native access to PCI extended config space, falling back to CSI\n");
+ csi = rtl_csi_read(tp, RTL_GEN3_RELATED_OFF);
+ rtl_csi_write(tp, RTL_GEN3_RELATED_OFF, csi & ~RTL_GEN3_ZRXDC_NONCOMPL);
+}
+
static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val)
{
struct pci_dev *pdev = tp->pci_dev;
@@ -3822,6 +3849,7 @@ static void rtl_hw_start_8125d(struct rtl8169_private *tp)
static void rtl_hw_start_8126a(struct rtl8169_private *tp)
{
+ rtl_disable_zrxdc_timeout(tp);
rtl_set_def_aspm_entry_latency(tp);
rtl_hw_start_8125_common(tp);
}
@@ -5222,6 +5250,7 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
new_bus->priv = tp;
new_bus->parent = &pdev->dev;
new_bus->irq[0] = PHY_MAC_INTERRUPT;
+ new_bus->phy_mask = GENMASK(31, 1);
snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x",
pci_domain_nr(pdev->bus), pci_dev_id(pdev));
@@ -5326,6 +5355,9 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
/* RTL8168c */
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
return JUMBO_6K;
+ /* RTL8125/8126 */
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
+ return JUMBO_16K;
default:
return JUMBO_9K;
}
diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
index 2aacc1996796..55b8d3666153 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -1925,8 +1925,8 @@ static u16 rtase_calc_time_mitigation(u32 time_us)
time_us = min_t(int, time_us, RTASE_MITI_MAX_TIME);
- msb = fls(time_us);
- if (msb >= RTASE_MITI_COUNT_BIT_NUM) {
+ if (time_us > RTASE_MITI_TIME_COUNT_MASK) {
+ msb = fls(time_us);
time_unit = msb - RTASE_MITI_COUNT_BIT_NUM;
time_count = time_us >> (msb - RTASE_MITI_COUNT_BIT_NUM);
} else {
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index e25db747a81a..c660eb933f24 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -101,8 +101,8 @@ struct stmmac_rxq_stats {
/* Updates on each CPU protected by not allowing nested irqs. */
struct stmmac_pcpu_stats {
struct u64_stats_sync syncp;
- u64_stats_t rx_normal_irq_n[MTL_MAX_TX_QUEUES];
- u64_stats_t tx_normal_irq_n[MTL_MAX_RX_QUEUES];
+ u64_stats_t rx_normal_irq_n[MTL_MAX_RX_QUEUES];
+ u64_stats_t tx_normal_irq_n[MTL_MAX_TX_QUEUES];
};
/* Extra statistic and debug information exposed by ethtool */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index ab7c2750c104..702ea5a00b56 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -590,6 +590,9 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
if (ret)
goto err_disable_device;
+ plat->tx_fifo_size = SZ_16K * plat->tx_queues_to_use;
+ plat->rx_fifo_size = SZ_16K * plat->rx_queues_to_use;
+
if (dev_of_node(&pdev->dev))
ret = loongson_dwmac_dt_config(pdev, plat, &res);
else
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index a4dc89e23a68..a33be23121b3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -33,6 +33,7 @@ struct rk_gmac_ops {
void (*set_clock_selection)(struct rk_priv_data *bsp_priv, bool input,
bool enable);
void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
+ bool php_grf_required;
bool regs_valid;
u32 regs[];
};
@@ -1254,6 +1255,7 @@ static const struct rk_gmac_ops rk3576_ops = {
.set_rgmii_speed = rk3576_set_gmac_speed,
.set_rmii_speed = rk3576_set_gmac_speed,
.set_clock_selection = rk3576_set_clock_selection,
+ .php_grf_required = true,
.regs_valid = true,
.regs = {
0x2a220000, /* gmac0 */
@@ -1401,6 +1403,7 @@ static const struct rk_gmac_ops rk3588_ops = {
.set_rgmii_speed = rk3588_set_gmac_speed,
.set_rmii_speed = rk3588_set_gmac_speed,
.set_clock_selection = rk3588_set_clock_selection,
+ .php_grf_required = true,
.regs_valid = true,
.regs = {
0xfe1b0000, /* gmac0 */
@@ -1812,8 +1815,22 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
bsp_priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,grf");
- bsp_priv->php_grf = syscon_regmap_lookup_by_phandle(dev->of_node,
- "rockchip,php-grf");
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err_probe(dev, PTR_ERR(bsp_priv->grf),
+ "failed to lookup rockchip,grf\n");
+ return ERR_CAST(bsp_priv->grf);
+ }
+
+ if (ops->php_grf_required) {
+ bsp_priv->php_grf =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,php-grf");
+ if (IS_ERR(bsp_priv->php_grf)) {
+ dev_err_probe(dev, PTR_ERR(bsp_priv->php_grf),
+ "failed to lookup rockchip,php-grf\n");
+ return ERR_CAST(bsp_priv->php_grf);
+ }
+ }
if (plat->phy_node) {
bsp_priv->integrated_phy = of_property_read_bool(plat->phy_node,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 16020b72dec8..ece8588b3b17 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -523,24 +523,6 @@ static int socfpga_dwmac_resume(struct device *dev)
dwmac_priv->ops->set_phy_mode(priv->plat->bsp_priv);
- /* Before the enet controller is suspended, the phy is suspended.
- * This causes the phy clock to be gated. The enet controller is
- * resumed before the phy, so the clock is still gated "off" when
- * the enet controller is resumed. This code makes sure the phy
- * is "resumed" before reinitializing the enet controller since
- * the enet controller depends on an active phy clock to complete
- * a DMA reset. A DMA reset will "time out" if executed
- * with no phy clock input on the Synopsys enet controller.
- * Verified through Synopsys Case #8000711656.
- *
- * Note that the phy clock is also gated when the phy is isolated.
- * Phy "suspend" and "isolate" controls are located in phy basic
- * control register 0, and can be modified by the phy driver
- * framework.
- */
- if (ndev->phydev)
- phy_resume(ndev->phydev);
-
return stmmac_resume(dev);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 4b7b2582a120..9d31fa5bbe15 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -964,7 +964,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
/* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
* address. No need to mask it again.
*/
- reg |= 1 << H3_EPHY_ADDR_SHIFT;
+ reg |= ret << H3_EPHY_ADDR_SHIFT;
} else {
/* For SoCs without internal PHY the PHY selection bit should be
* set to 0 (external PHY).
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 600fea8f712f..2d5bf1de5d2e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -331,8 +331,8 @@ enum rtc_control {
/* PTP and timestamping registers */
-#define GMAC3_X_ATSNS GENMASK(19, 16)
-#define GMAC3_X_ATSNS_SHIFT 16
+#define GMAC3_X_ATSNS GENMASK(29, 25)
+#define GMAC3_X_ATSNS_SHIFT 25
#define GMAC_PTP_TCR_ATSFC BIT(24)
#define GMAC_PTP_TCR_ATSEN0 BIT(25)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 96bcda0856ec..11c525b8d269 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -560,7 +560,7 @@ void dwmac1000_get_ptptime(void __iomem *ptpaddr, u64 *ptp_time)
u64 ns;
ns = readl(ptpaddr + GMAC_PTP_ATNR);
- ns += readl(ptpaddr + GMAC_PTP_ATSR) * NSEC_PER_SEC;
+ ns += (u64)readl(ptpaddr + GMAC_PTP_ATSR) * NSEC_PER_SEC;
*ptp_time = ns;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f05cae103d83..dae279ee2c28 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -257,7 +257,7 @@ struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
u32 tx_coal_timer[MTL_MAX_TX_QUEUES];
- u32 rx_coal_frames[MTL_MAX_TX_QUEUES];
+ u32 rx_coal_frames[MTL_MAX_RX_QUEUES];
int hwts_tx_en;
bool tx_path_in_lpi_mode;
@@ -265,8 +265,7 @@ struct stmmac_priv {
int sph;
int sph_cap;
u32 sarc_type;
-
- u32 rx_riwt[MTL_MAX_TX_QUEUES];
+ u32 rx_riwt[MTL_MAX_RX_QUEUES];
int hwts_rx_en;
void __iomem *ioaddr;
@@ -343,7 +342,7 @@ struct stmmac_priv {
char int_name_sfty[IFNAMSIZ + 10];
char int_name_sfty_ce[IFNAMSIZ + 10];
char int_name_sfty_ue[IFNAMSIZ + 10];
- char int_name_rx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 14];
+ char int_name_rx_irq[MTL_MAX_RX_QUEUES][IFNAMSIZ + 14];
char int_name_tx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 18];
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 0f59aa982604..e2840fa241f2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -222,7 +222,7 @@ static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time)
u64 ns;
ns = readl(ptpaddr + PTP_ATNR);
- ns += readl(ptpaddr + PTP_ATSR) * NSEC_PER_SEC;
+ ns += (u64)readl(ptpaddr + PTP_ATSR) * NSEC_PER_SEC;
*ptp_time = ns;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index b7c3bfdaa180..b9340f8bd182 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3448,9 +3448,18 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
if (priv->hw->phylink_pcs)
phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
+ /* Note that clk_rx_i must be running for reset to complete. This
+ * clock may also be required when setting the MAC address.
+ *
+ * Block the receive clock stop for LPI mode at the PHY in case
+ * the link is established with EEE mode active.
+ */
+ phylink_rx_clk_stop_block(priv->phylink);
+
/* DMA initialization and SW reset */
ret = stmmac_init_dma_engine(priv);
if (ret < 0) {
+ phylink_rx_clk_stop_unblock(priv->phylink);
netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
__func__);
return ret;
@@ -3458,6 +3467,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Copy the MAC addr into the HW */
stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
+ phylink_rx_clk_stop_unblock(priv->phylink);
/* PS and related bits will be programmed according to the speed */
if (priv->hw->pcs) {
@@ -3568,7 +3578,9 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_hw_vlan_mode(priv, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
return 0;
}
@@ -5853,6 +5865,9 @@ static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
* whenever multicast addresses must be enabled/disabled.
* Return value:
* void.
+ *
+ * FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
*/
static void stmmac_set_rx_mode(struct net_device *dev)
{
@@ -5985,7 +6000,9 @@ static int stmmac_set_features(struct net_device *netdev,
else
priv->hw->hw_vlan_en = false;
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_hw_vlan_mode(priv, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
return 0;
}
@@ -6269,7 +6286,9 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
if (ret)
goto set_mac_error;
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
+ phylink_rx_clk_stop_unblock(priv->phylink);
set_mac_error:
pm_runtime_put(priv->device);
@@ -6625,6 +6644,9 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
}
+/* FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
+ */
static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -6656,6 +6678,9 @@ err_pm_put:
return ret;
}
+/* FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
+ */
static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -7813,13 +7838,11 @@ int stmmac_suspend(struct device *dev)
mutex_unlock(&priv->lock);
rtnl_lock();
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
- phylink_suspend(priv->phylink, true);
- } else {
- if (device_may_wakeup(priv->device))
- phylink_speed_down(priv->phylink, false);
- phylink_suspend(priv->phylink, false);
- }
+ if (device_may_wakeup(priv->device) && !priv->plat->pmt)
+ phylink_speed_down(priv->phylink, false);
+
+ phylink_suspend(priv->phylink,
+ device_may_wakeup(priv->device) && priv->plat->pmt);
rtnl_unlock();
if (stmmac_fpe_supported(priv))
@@ -7909,16 +7932,12 @@ int stmmac_resume(struct device *dev)
}
rtnl_lock();
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
- phylink_resume(priv->phylink);
- } else {
- phylink_resume(priv->phylink);
- if (device_may_wakeup(priv->device))
- phylink_speed_up(priv->phylink);
- }
- rtnl_unlock();
- rtnl_lock();
+ /* Prepare the PHY to resume, ensuring that its clocks which are
+ * necessary for the MAC DMA reset to complete are running
+ */
+ phylink_prepare_resume(priv->phylink);
+
mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
@@ -7928,14 +7947,25 @@ int stmmac_resume(struct device *dev)
stmmac_hw_setup(ndev, false);
stmmac_init_coalesce(priv);
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_rx_mode(ndev);
stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
stmmac_enable_all_queues(priv);
stmmac_enable_all_dma_irq(priv);
mutex_unlock(&priv->lock);
+
+ /* phylink_resume() must be called after the hardware has been
+ * initialised because it may bring the link up immediately in a
+ * workqueue thread, which will race with initialisation.
+ */
+ phylink_resume(priv->phylink);
+ if (device_may_wakeup(priv->device) && !priv->plat->pmt)
+ phylink_speed_up(priv->phylink);
+
rtnl_unlock();
netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 72177fea1cfb..edc3165f0077 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9064,6 +9064,8 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
msi_vec[i].entry = i;
}
+ pdev->dev_flags |= PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST;
+
num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs);
if (num_irqs < 0) {
np->flags &= ~NIU_FLAGS_MSIX;
diff --git a/drivers/net/ethernet/tehuti/tn40.c b/drivers/net/ethernet/tehuti/tn40.c
index 259bdac24cf2..558b791a97ed 100644
--- a/drivers/net/ethernet/tehuti/tn40.c
+++ b/drivers/net/ethernet/tehuti/tn40.c
@@ -1778,7 +1778,7 @@ static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = tn40_phy_register(priv);
if (ret) {
dev_err(&pdev->dev, "failed to set up PHY.\n");
- goto err_free_irq;
+ goto err_cleanup_swnodes;
}
ret = tn40_priv_init(priv);
@@ -1795,6 +1795,8 @@ static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_unregister_phydev:
tn40_phy_unregister(priv);
+err_cleanup_swnodes:
+ tn40_swnodes_cleanup(priv);
err_free_irq:
pci_free_irq_vectors(pdev);
err_unset_drvdata:
@@ -1816,6 +1818,7 @@ static void tn40_remove(struct pci_dev *pdev)
unregister_netdev(ndev);
tn40_phy_unregister(priv);
+ tn40_swnodes_cleanup(priv);
pci_free_irq_vectors(priv->pdev);
pci_set_drvdata(pdev, NULL);
iounmap(priv->regs);
@@ -1832,6 +1835,10 @@ static const struct pci_device_id tn40_id_table[] = {
PCI_VENDOR_ID_ASUSTEK, 0x8709) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022,
PCI_VENDOR_ID_EDIMAX, 0x8103) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, PCI_DEVICE_ID_TEHUTI_TN9510,
+ PCI_VENDOR_ID_TEHUTI, 0x3015) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, PCI_DEVICE_ID_TEHUTI_TN9510,
+ PCI_VENDOR_ID_EDIMAX, 0x8102) },
{ }
};
diff --git a/drivers/net/ethernet/tehuti/tn40.h b/drivers/net/ethernet/tehuti/tn40.h
index 490781fe5120..25da8686d469 100644
--- a/drivers/net/ethernet/tehuti/tn40.h
+++ b/drivers/net/ethernet/tehuti/tn40.h
@@ -4,10 +4,13 @@
#ifndef _TN40_H_
#define _TN40_H_
+#include <linux/property.h>
#include "tn40_regs.h"
#define TN40_DRV_NAME "tn40xx"
+#define PCI_DEVICE_ID_TEHUTI_TN9510 0x4025
+
#define TN40_MDIO_SPEED_1MHZ (1)
#define TN40_MDIO_SPEED_6MHZ (6)
@@ -102,10 +105,39 @@ struct tn40_txdb {
int size; /* Number of elements in the db */
};
+#define NODE_PROP(_NAME, _PROP) ( \
+ (const struct software_node) { \
+ .name = _NAME, \
+ .properties = _PROP, \
+ })
+
+#define NODE_PAR_PROP(_NAME, _PAR, _PROP) ( \
+ (const struct software_node) { \
+ .name = _NAME, \
+ .parent = _PAR, \
+ .properties = _PROP, \
+ })
+
+enum tn40_swnodes {
+ SWNODE_MDIO,
+ SWNODE_PHY,
+ SWNODE_MAX
+};
+
+struct tn40_nodes {
+ char phy_name[32];
+ char mdio_name[32];
+ struct property_entry phy_props[3];
+ struct software_node swnodes[SWNODE_MAX];
+ const struct software_node *group[SWNODE_MAX + 1];
+};
+
struct tn40_priv {
struct net_device *ndev;
struct pci_dev *pdev;
+ struct tn40_nodes nodes;
+
struct napi_struct napi;
/* RX FIFOs: 1 for data (full) descs, and 2 for free descs */
struct tn40_rxd_fifo rxd_fifo0;
@@ -225,6 +257,7 @@ static inline void tn40_write_reg(struct tn40_priv *priv, u32 reg, u32 val)
int tn40_set_link_speed(struct tn40_priv *priv, u32 speed);
+void tn40_swnodes_cleanup(struct tn40_priv *priv);
int tn40_mdiobus_init(struct tn40_priv *priv);
int tn40_phy_register(struct tn40_priv *priv);
diff --git a/drivers/net/ethernet/tehuti/tn40_mdio.c b/drivers/net/ethernet/tehuti/tn40_mdio.c
index af18615d64a8..5bb0cbc87d06 100644
--- a/drivers/net/ethernet/tehuti/tn40_mdio.c
+++ b/drivers/net/ethernet/tehuti/tn40_mdio.c
@@ -14,6 +14,8 @@
(FIELD_PREP(TN40_MDIO_PRTAD_MASK, (port))))
#define TN40_MDIO_CMD_READ BIT(15)
+#define AQR105_FIRMWARE "tehuti/aqr105-tn40xx.cld"
+
static void tn40_mdio_set_speed(struct tn40_priv *priv, u32 speed)
{
void __iomem *regs = priv->regs;
@@ -111,6 +113,56 @@ static int tn40_mdio_write_c45(struct mii_bus *mii_bus, int addr, int devnum,
return tn40_mdio_write(mii_bus->priv, addr, devnum, regnum, val);
}
+/* registers an mdio node and an aqr105 PHY at address 1
+ * tn40_mdio-%id {
+ * ethernet-phy@1 {
+ * compatible = "ethernet-phy-id03a1.b4a3";
+ * reg = <1>;
+ * firmware-name = AQR105_FIRMWARE;
+ * };
+ * };
+ */
+static int tn40_swnodes_register(struct tn40_priv *priv)
+{
+ struct tn40_nodes *nodes = &priv->nodes;
+ struct pci_dev *pdev = priv->pdev;
+ struct software_node *swnodes;
+ u32 id;
+
+ id = pci_dev_id(pdev);
+
+ snprintf(nodes->phy_name, sizeof(nodes->phy_name), "ethernet-phy@1");
+ snprintf(nodes->mdio_name, sizeof(nodes->mdio_name), "tn40_mdio-%x",
+ id);
+
+ swnodes = nodes->swnodes;
+
+ swnodes[SWNODE_MDIO] = NODE_PROP(nodes->mdio_name, NULL);
+
+ nodes->phy_props[0] = PROPERTY_ENTRY_STRING("compatible",
+ "ethernet-phy-id03a1.b4a3");
+ nodes->phy_props[1] = PROPERTY_ENTRY_U32("reg", 1);
+ nodes->phy_props[2] = PROPERTY_ENTRY_STRING("firmware-name",
+ AQR105_FIRMWARE);
+ swnodes[SWNODE_PHY] = NODE_PAR_PROP(nodes->phy_name,
+ &swnodes[SWNODE_MDIO],
+ nodes->phy_props);
+
+ nodes->group[SWNODE_PHY] = &swnodes[SWNODE_PHY];
+ nodes->group[SWNODE_MDIO] = &swnodes[SWNODE_MDIO];
+ return software_node_register_node_group(nodes->group);
+}
+
+void tn40_swnodes_cleanup(struct tn40_priv *priv)
+{
+ /* cleanup of swnodes is only needed for AQR105-based cards */
+ if (priv->pdev->device == PCI_DEVICE_ID_TEHUTI_TN9510) {
+ fwnode_handle_put(dev_fwnode(&priv->mdio->dev));
+ device_remove_software_node(&priv->mdio->dev);
+ software_node_unregister_node_group(priv->nodes.group);
+ }
+}
+
int tn40_mdiobus_init(struct tn40_priv *priv)
{
struct pci_dev *pdev = priv->pdev;
@@ -129,14 +181,40 @@ int tn40_mdiobus_init(struct tn40_priv *priv)
bus->read_c45 = tn40_mdio_read_c45;
bus->write_c45 = tn40_mdio_write_c45;
+ priv->mdio = bus;
+
+ /* provide swnodes for AQR105-based cards only */
+ if (pdev->device == PCI_DEVICE_ID_TEHUTI_TN9510) {
+ ret = tn40_swnodes_register(priv);
+ if (ret) {
+ pr_err("swnodes failed\n");
+ return ret;
+ }
+
+ ret = device_add_software_node(&bus->dev,
+ priv->nodes.group[SWNODE_MDIO]);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "device_add_software_node failed: %d\n", ret);
+ goto err_swnodes_unregister;
+ }
+ }
ret = devm_mdiobus_register(&pdev->dev, bus);
if (ret) {
dev_err(&pdev->dev, "failed to register mdiobus %d %u %u\n",
ret, bus->state, MDIOBUS_UNREGISTERED);
- return ret;
+ goto err_swnodes_cleanup;
}
tn40_mdio_set_speed(priv, TN40_MDIO_SPEED_6MHZ);
- priv->mdio = bus;
return 0;
+
+err_swnodes_unregister:
+ software_node_unregister_node_group(priv->nodes.group);
+ return ret;
+err_swnodes_cleanup:
+ tn40_swnodes_cleanup(priv);
+ return ret;
}
+
+MODULE_FIRMWARE(AQR105_FIRMWARE);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index afe8127fd32b..114110740716 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -515,7 +515,7 @@ static void am65_cpsw_destroy_rxq(struct am65_cpsw_common *common, int id)
napi_disable(&flow->napi_rx);
hrtimer_cancel(&flow->rx_hrtimer);
k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, id, rx_chn,
- am65_cpsw_nuss_rx_cleanup, !!id);
+ am65_cpsw_nuss_rx_cleanup);
for (port = 0; port < common->port_num; port++) {
if (!common->ports[port].ndev)
@@ -2775,7 +2775,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
port->slave.mac_addr);
if (!is_valid_ether_addr(port->slave.mac_addr)) {
eth_random_addr(port->slave.mac_addr);
- dev_err(dev, "Use random MAC address\n");
+ dev_info(dev, "Use random MAC address\n");
}
}
@@ -3433,7 +3433,7 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
for (i = 0; i < common->rx_ch_num_flows; i++)
k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i,
rx_chan,
- am65_cpsw_nuss_rx_cleanup, !!i);
+ am65_cpsw_nuss_rx_cleanup);
k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index cec0a90659d9..66713bc93174 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1418,6 +1418,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
SET_NETDEV_DEV(ndev, dev);
+ ndev->dev.of_node = slave_data->slave_node;
if (!napi_ndev) {
/* CPSW Host port CPDMA interface is shared between
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index 74f0f200a89d..62065416e886 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -955,7 +955,7 @@ void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
for (i = 0; i < num_flows; i++)
k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn,
- prueth_rx_cleanup, !!i);
+ prueth_rx_cleanup);
if (disable)
k3_udma_glue_disable_rx_chn(chn->rx_chn);
}
diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
index 89dc4c401a8d..e4d993f31374 100644
--- a/drivers/net/ethernet/vertexcom/mse102x.c
+++ b/drivers/net/ethernet/vertexcom/mse102x.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -33,7 +34,7 @@
#define CMD_CTR (0x2 << CMD_SHIFT)
#define CMD_MASK GENMASK(15, CMD_SHIFT)
-#define LEN_MASK GENMASK(CMD_SHIFT - 1, 0)
+#define LEN_MASK GENMASK(CMD_SHIFT - 2, 0)
#define DET_CMD_LEN 4
#define DET_SOF_LEN 2
@@ -262,7 +263,7 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp,
}
static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff,
- unsigned int frame_len)
+ unsigned int frame_len, bool drop)
{
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
struct spi_transfer *xfer = &mses->spi_xfer;
@@ -280,6 +281,9 @@ static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff,
netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
__func__, ret);
mse->stats.xfer_err++;
+ } else if (drop) {
+ netdev_dbg(mse->ndev, "%s: Drop frame\n", __func__);
+ ret = -EINVAL;
} else if (*sof != cpu_to_be16(DET_SOF)) {
netdev_dbg(mse->ndev, "%s: SPI start of frame is invalid (0x%04x)\n",
__func__, *sof);
@@ -307,6 +311,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
struct sk_buff *skb;
unsigned int rxalign;
unsigned int rxlen;
+ bool drop = false;
__be16 rx = 0;
u16 cmd_resp;
u8 *rxpkt;
@@ -329,7 +334,8 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
__func__, cmd_resp);
mse->stats.invalid_rts++;
- return;
+ drop = true;
+ goto drop;
}
net_dbg_ratelimited("%s: Unexpected response to first CMD\n",
@@ -337,12 +343,20 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
}
rxlen = cmd_resp & LEN_MASK;
- if (!rxlen) {
- net_dbg_ratelimited("%s: No frame length defined\n", __func__);
+ if (rxlen < ETH_ZLEN || rxlen > VLAN_ETH_FRAME_LEN) {
+ net_dbg_ratelimited("%s: Invalid frame length: %d\n", __func__,
+ rxlen);
mse->stats.invalid_len++;
- return;
+ drop = true;
}
+ /* In case of a invalid CMD_RTS, the frame must be consumed anyway.
+ * So assume the maximum possible frame length.
+ */
+drop:
+ if (drop)
+ rxlen = VLAN_ETH_FRAME_LEN;
+
rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4);
skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign);
if (!skb)
@@ -353,7 +367,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
* They are copied, but ignored.
*/
rxpkt = skb_put(skb, rxlen) - DET_SOF_LEN;
- if (mse102x_rx_frame_spi(mse, rxpkt, rxlen)) {
+ if (mse102x_rx_frame_spi(mse, rxpkt, rxlen, drop)) {
mse->ndev->stats.rx_errors++;
dev_kfree_skb(skb);
return;
@@ -509,6 +523,7 @@ static irqreturn_t mse102x_irq(int irq, void *_mse)
static int mse102x_net_open(struct net_device *ndev)
{
struct mse102x_net *mse = netdev_priv(ndev);
+ struct mse102x_net_spi *mses = to_mse102x_spi(mse);
int ret;
ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT,
@@ -524,6 +539,13 @@ static int mse102x_net_open(struct net_device *ndev)
netif_carrier_on(ndev);
+ /* The SPI interrupt can stuck in case of pending packet(s).
+ * So poll for possible packet(s) to re-arm the interrupt.
+ */
+ mutex_lock(&mses->lock);
+ mse102x_rx_pkt_spi(mse);
+ mutex_unlock(&mses->lock);
+
netif_dbg(mse, ifup, ndev, "network device up\n");
return 0;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 234db693cefa..3065f25777bb 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -158,7 +158,6 @@ struct hv_netvsc_packet {
u8 cp_partial; /* partial copy into send buffer */
u8 rmsg_size; /* RNDIS header and PPI size */
- u8 rmsg_pgcnt; /* page count of RNDIS header and PPI */
u8 page_buf_cnt;
u16 q_idx;
@@ -893,6 +892,18 @@ struct nvsp_message {
sizeof(struct nvsp_message))
#define NETVSC_MIN_IN_MSG_SIZE sizeof(struct vmpacket_descriptor)
+/* Maximum # of contiguous data ranges that can make up a trasmitted packet.
+ * Typically it's the max SKB fragments plus 2 for the rndis packet and the
+ * linear portion of the SKB. But if MAX_SKB_FRAGS is large, the value may
+ * need to be limited to MAX_PAGE_BUFFER_COUNT, which is the max # of entries
+ * in a GPA direct packet sent to netvsp over VMBus.
+ */
+#if MAX_SKB_FRAGS + 2 < MAX_PAGE_BUFFER_COUNT
+#define MAX_DATA_RANGES (MAX_SKB_FRAGS + 2)
+#else
+#define MAX_DATA_RANGES MAX_PAGE_BUFFER_COUNT
+#endif
+
/* Estimated requestor size:
* out_ring_size/min_out_msg_size + in_ring_size/min_in_msg_size
*/
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index d6f5b9ea3109..720104661d7f 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -953,8 +953,7 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
+ pend_size;
int i;
u32 padding = 0;
- u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
- packet->page_buf_cnt;
+ u32 page_count = packet->cp_partial ? 1 : packet->page_buf_cnt;
u32 remain;
/* Add padding */
@@ -1055,6 +1054,42 @@ static int netvsc_dma_map(struct hv_device *hv_dev,
return 0;
}
+/* Build an "array" of mpb entries describing the data to be transferred
+ * over VMBus. After the desc header fields, each "array" entry is variable
+ * size, and each entry starts after the end of the previous entry. The
+ * "offset" and "len" fields for each entry imply the size of the entry.
+ *
+ * The pfns are in HV_HYP_PAGE_SIZE, because all communication with Hyper-V
+ * uses that granularity, even if the system page size of the guest is larger.
+ * Each entry in the input "pb" array must describe a contiguous range of
+ * guest physical memory so that the pfns are sequential if the range crosses
+ * a page boundary. The offset field must be < HV_HYP_PAGE_SIZE.
+ */
+static inline void netvsc_build_mpb_array(struct hv_page_buffer *pb,
+ u32 page_buffer_count,
+ struct vmbus_packet_mpb_array *desc,
+ u32 *desc_size)
+{
+ struct hv_mpb_array *mpb_entry = &desc->range;
+ int i, j;
+
+ for (i = 0; i < page_buffer_count; i++) {
+ u32 offset = pb[i].offset;
+ u32 len = pb[i].len;
+
+ mpb_entry->offset = offset;
+ mpb_entry->len = len;
+
+ for (j = 0; j < HVPFN_UP(offset + len); j++)
+ mpb_entry->pfn_array[j] = pb[i].pfn + j;
+
+ mpb_entry = (struct hv_mpb_array *)&mpb_entry->pfn_array[j];
+ }
+
+ desc->rangecount = page_buffer_count;
+ *desc_size = (char *)mpb_entry - (char *)desc;
+}
+
static inline int netvsc_send_pkt(
struct hv_device *device,
struct hv_netvsc_packet *packet,
@@ -1097,8 +1132,11 @@ static inline int netvsc_send_pkt(
packet->dma_range = NULL;
if (packet->page_buf_cnt) {
+ struct vmbus_channel_packet_page_buffer desc;
+ u32 desc_size;
+
if (packet->cp_partial)
- pb += packet->rmsg_pgcnt;
+ pb++;
ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb);
if (ret) {
@@ -1106,11 +1144,12 @@ static inline int netvsc_send_pkt(
goto exit;
}
- ret = vmbus_sendpacket_pagebuffer(out_channel,
- pb, packet->page_buf_cnt,
- &nvmsg, sizeof(nvmsg),
- req_id);
-
+ netvsc_build_mpb_array(pb, packet->page_buf_cnt,
+ (struct vmbus_packet_mpb_array *)&desc,
+ &desc_size);
+ ret = vmbus_sendpacket_mpb_desc(out_channel,
+ (struct vmbus_packet_mpb_array *)&desc,
+ desc_size, &nvmsg, sizeof(nvmsg), req_id);
if (ret)
netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
} else {
@@ -1259,7 +1298,7 @@ int netvsc_send(struct net_device *ndev,
packet->send_buf_index = section_index;
if (packet->cp_partial) {
- packet->page_buf_cnt -= packet->rmsg_pgcnt;
+ packet->page_buf_cnt--;
packet->total_data_buflen = msd_len + packet->rmsg_size;
} else {
packet->page_buf_cnt = 0;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index d6c4abfc3a28..d9ddbf087845 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -325,43 +325,10 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
return txq;
}
-static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len,
- struct hv_page_buffer *pb)
-{
- int j = 0;
-
- hvpfn += offset >> HV_HYP_PAGE_SHIFT;
- offset = offset & ~HV_HYP_PAGE_MASK;
-
- while (len > 0) {
- unsigned long bytes;
-
- bytes = HV_HYP_PAGE_SIZE - offset;
- if (bytes > len)
- bytes = len;
- pb[j].pfn = hvpfn;
- pb[j].offset = offset;
- pb[j].len = bytes;
-
- offset += bytes;
- len -= bytes;
-
- if (offset == HV_HYP_PAGE_SIZE && len) {
- hvpfn++;
- offset = 0;
- j++;
- }
- }
-
- return j + 1;
-}
-
static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
struct hv_netvsc_packet *packet,
struct hv_page_buffer *pb)
{
- u32 slots_used = 0;
- char *data = skb->data;
int frags = skb_shinfo(skb)->nr_frags;
int i;
@@ -370,28 +337,27 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
* 2. skb linear data
* 3. skb fragment data
*/
- slots_used += fill_pg_buf(virt_to_hvpfn(hdr),
- offset_in_hvpage(hdr),
- len,
- &pb[slots_used]);
+ pb[0].offset = offset_in_hvpage(hdr);
+ pb[0].len = len;
+ pb[0].pfn = virt_to_hvpfn(hdr);
packet->rmsg_size = len;
- packet->rmsg_pgcnt = slots_used;
- slots_used += fill_pg_buf(virt_to_hvpfn(data),
- offset_in_hvpage(data),
- skb_headlen(skb),
- &pb[slots_used]);
+ pb[1].offset = offset_in_hvpage(skb->data);
+ pb[1].len = skb_headlen(skb);
+ pb[1].pfn = virt_to_hvpfn(skb->data);
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+ struct hv_page_buffer *cur_pb = &pb[i + 2];
+ u64 pfn = page_to_hvpfn(skb_frag_page(frag));
+ u32 offset = skb_frag_off(frag);
- slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)),
- skb_frag_off(frag),
- skb_frag_size(frag),
- &pb[slots_used]);
+ cur_pb->offset = offset_in_hvpage(offset);
+ cur_pb->len = skb_frag_size(frag);
+ cur_pb->pfn = pfn + (offset >> HV_HYP_PAGE_SHIFT);
}
- return slots_used;
+ return frags + 2;
}
static int count_skb_frag_slots(struct sk_buff *skb)
@@ -482,7 +448,7 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
struct net_device *vf_netdev;
u32 rndis_msg_size;
u32 hash;
- struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
+ struct hv_page_buffer pb[MAX_DATA_RANGES];
/* If VF is present and up then redirect packets to it.
* Skip the VF if it is marked down or has no carrier.
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index c0ceeef4fcd8..9b8769a8b77a 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -225,8 +225,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
struct rndis_request *req)
{
struct hv_netvsc_packet *packet;
- struct hv_page_buffer page_buf[2];
- struct hv_page_buffer *pb = page_buf;
+ struct hv_page_buffer pb;
int ret;
/* Setup the packet to send it */
@@ -235,27 +234,14 @@ static int rndis_filter_send_request(struct rndis_device *dev,
packet->total_data_buflen = req->request_msg.msg_len;
packet->page_buf_cnt = 1;
- pb[0].pfn = virt_to_phys(&req->request_msg) >>
- HV_HYP_PAGE_SHIFT;
- pb[0].len = req->request_msg.msg_len;
- pb[0].offset = offset_in_hvpage(&req->request_msg);
-
- /* Add one page_buf when request_msg crossing page boundary */
- if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) {
- packet->page_buf_cnt++;
- pb[0].len = HV_HYP_PAGE_SIZE -
- pb[0].offset;
- pb[1].pfn = virt_to_phys((void *)&req->request_msg
- + pb[0].len) >> HV_HYP_PAGE_SHIFT;
- pb[1].offset = 0;
- pb[1].len = req->request_msg.msg_len -
- pb[0].len;
- }
+ pb.pfn = virt_to_phys(&req->request_msg) >> HV_HYP_PAGE_SHIFT;
+ pb.len = req->request_msg.msg_len;
+ pb.offset = offset_in_hvpage(&req->request_msg);
trace_rndis_send(dev->ndev, 0, &req->request_msg);
rcu_read_lock_bh();
- ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL, false);
+ ret = netvsc_send(dev->ndev, packet, NULL, &pb, NULL, false);
rcu_read_unlock_bh();
return ret;
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 753215ebc67c..a036910f6082 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -1446,8 +1446,7 @@ static u8 mcps_data_request(
command.pdata.data_req.src_addr_mode = src_addr_mode;
command.pdata.data_req.dst.mode = dst_address_mode;
if (dst_address_mode != MAC_MODE_NO_ADDR) {
- command.pdata.data_req.dst.pan_id[0] = LS_BYTE(dst_pan_id);
- command.pdata.data_req.dst.pan_id[1] = MS_BYTE(dst_pan_id);
+ put_unaligned_le16(dst_pan_id, command.pdata.data_req.dst.pan_id);
if (dst_address_mode == MAC_MODE_SHORT_ADDR) {
command.pdata.data_req.dst.address[0] = LS_BYTE(
dst_addr->short_address
@@ -1795,12 +1794,12 @@ static int ca8210_skb_rx(
}
hdr.source.mode = data_ind[0];
dev_dbg(&priv->spi->dev, "srcAddrMode: %#03x\n", hdr.source.mode);
- hdr.source.pan_id = *(u16 *)&data_ind[1];
+ hdr.source.pan_id = cpu_to_le16(get_unaligned_le16(&data_ind[1]));
dev_dbg(&priv->spi->dev, "srcPanId: %#06x\n", hdr.source.pan_id);
memcpy(&hdr.source.extended_addr, &data_ind[3], 8);
hdr.dest.mode = data_ind[11];
dev_dbg(&priv->spi->dev, "dstAddrMode: %#03x\n", hdr.dest.mode);
- hdr.dest.pan_id = *(u16 *)&data_ind[12];
+ hdr.dest.pan_id = cpu_to_le16(get_unaligned_le16(&data_ind[12]));
dev_dbg(&priv->spi->dev, "dstPanId: %#06x\n", hdr.dest.pan_id);
memcpy(&hdr.dest.extended_addr, &data_ind[14], 8);
@@ -1927,7 +1926,7 @@ static int ca8210_skb_tx(
status = mcps_data_request(
header.source.mode,
header.dest.mode,
- header.dest.pan_id,
+ le16_to_cpu(header.dest.pan_id),
(union macaddr *)&header.dest.extended_addr,
skb->len - mac_len,
&skb->data[mac_len],
diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c
index d74d47dd6e04..f782d93f826e 100644
--- a/drivers/net/mctp/mctp-i2c.c
+++ b/drivers/net/mctp/mctp-i2c.c
@@ -537,7 +537,7 @@ static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb)
rc = __i2c_transfer(midev->adapter, &msg, 1);
/* on tx errors, the flow can no longer be considered valid */
- if (rc)
+ if (rc < 0)
mctp_i2c_invalidate_tx_flow(midev, skb);
break;
diff --git a/drivers/net/mdio/mdio-mux-meson-gxl.c b/drivers/net/mdio/mdio-mux-meson-gxl.c
index 00c66240136b..3dd12a8c8b03 100644
--- a/drivers/net/mdio/mdio-mux-meson-gxl.c
+++ b/drivers/net/mdio/mdio-mux-meson-gxl.c
@@ -17,6 +17,7 @@
#define REG2_LEDACT GENMASK(23, 22)
#define REG2_LEDLINK GENMASK(25, 24)
#define REG2_DIV4SEL BIT(27)
+#define REG2_REVERSED BIT(28)
#define REG2_ADCBYPASS BIT(30)
#define REG2_CLKINSEL BIT(31)
#define ETH_REG3 0x4
@@ -65,7 +66,7 @@ static void gxl_enable_internal_mdio(struct gxl_mdio_mux *priv)
* The only constraint is that it must match the one in
* drivers/net/phy/meson-gxl.c to properly match the PHY.
*/
- writel(FIELD_PREP(REG2_PHYID, EPHY_GXL_ID),
+ writel(REG2_REVERSED | FIELD_PREP(REG2_PHYID, EPHY_GXL_ID),
priv->regs + ETH_REG2);
/* Enable the internal phy */
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 42f247cbdcee..a41dc79e9c2e 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -87,7 +87,8 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(nsim_forward_skb(peer_dev, skb, rq) == NET_RX_DROP))
goto out_drop_cnt;
- napi_schedule(&rq->napi);
+ if (!hrtimer_active(&rq->napi_timer))
+ hrtimer_start(&rq->napi_timer, us_to_ktime(5), HRTIMER_MODE_REL);
rcu_read_unlock();
u64_stats_update_begin(&ns->syncp);
@@ -426,6 +427,22 @@ err_pp_destroy:
return err;
}
+static enum hrtimer_restart nsim_napi_schedule(struct hrtimer *timer)
+{
+ struct nsim_rq *rq;
+
+ rq = container_of(timer, struct nsim_rq, napi_timer);
+ napi_schedule(&rq->napi);
+
+ return HRTIMER_NORESTART;
+}
+
+static void nsim_rq_timer_init(struct nsim_rq *rq)
+{
+ hrtimer_init(&rq->napi_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ rq->napi_timer.function = nsim_napi_schedule;
+}
+
static void nsim_enable_napi(struct netdevsim *ns)
{
struct net_device *dev = ns->netdev;
@@ -615,11 +632,13 @@ static struct nsim_rq *nsim_queue_alloc(void)
return NULL;
skb_queue_head_init(&rq->skb_queue);
+ nsim_rq_timer_init(rq);
return rq;
}
static void nsim_queue_free(struct nsim_rq *rq)
{
+ hrtimer_cancel(&rq->napi_timer);
skb_queue_purge_reason(&rq->skb_queue, SKB_DROP_REASON_QUEUE_PURGE);
kfree(rq);
}
@@ -645,8 +664,11 @@ nsim_queue_mem_alloc(struct net_device *dev, void *per_queue_mem, int idx)
if (ns->rq_reset_mode > 3)
return -EINVAL;
- if (ns->rq_reset_mode == 1)
+ if (ns->rq_reset_mode == 1) {
+ if (!netif_running(ns->netdev))
+ return -ENETDOWN;
return nsim_create_page_pool(&qmem->pp, &ns->rq[idx]->napi);
+ }
qmem->rq = nsim_queue_alloc();
if (!qmem->rq)
@@ -754,11 +776,6 @@ nsim_qreset_write(struct file *file, const char __user *data,
return -EINVAL;
rtnl_lock();
- if (!netif_running(ns->netdev)) {
- ret = -ENETDOWN;
- goto exit_unlock;
- }
-
if (queue >= ns->netdev->real_num_rx_queues) {
ret = -EINVAL;
goto exit_unlock;
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 96d54c08043d..e757f85ed861 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -97,6 +97,7 @@ struct nsim_rq {
struct napi_struct napi;
struct sk_buff_head skb_queue;
struct page_pool *page_pool;
+ struct hrtimer napi_timer;
};
struct netdevsim {
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 6599feca1967..e32013eb0186 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -31,6 +31,7 @@
#define MII_DP83822_RCSR 0x17
#define MII_DP83822_RESET_CTRL 0x1f
#define MII_DP83822_MLEDCR 0x25
+#define MII_DP83822_LDCTRL 0x403
#define MII_DP83822_LEDCFG1 0x460
#define MII_DP83822_IOCTRL1 0x462
#define MII_DP83822_IOCTRL2 0x463
@@ -123,6 +124,9 @@
#define DP83822_IOCTRL1_GPIO1_CTRL GENMASK(2, 0)
#define DP83822_IOCTRL1_GPIO1_CTRL_LED_1 BIT(0)
+/* LDCTRL bits */
+#define DP83822_100BASE_TX_LINE_DRIVER_SWING GENMASK(7, 4)
+
/* IOCTRL2 bits */
#define DP83822_IOCTRL2_GPIO2_CLK_SRC GENMASK(6, 4)
#define DP83822_IOCTRL2_GPIO2_CTRL GENMASK(2, 0)
@@ -197,6 +201,7 @@ struct dp83822_private {
bool set_gpio2_clk_out;
u32 gpio2_clk_out;
bool led_pin_enable[DP83822_MAX_LED_PINS];
+ int tx_amplitude_100base_tx_index;
};
static int dp83822_config_wol(struct phy_device *phydev,
@@ -522,6 +527,12 @@ static int dp83822_config_init(struct phy_device *phydev)
FIELD_PREP(DP83822_IOCTRL2_GPIO2_CLK_SRC,
dp83822->gpio2_clk_out));
+ if (dp83822->tx_amplitude_100base_tx_index >= 0)
+ phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_LDCTRL,
+ DP83822_100BASE_TX_LINE_DRIVER_SWING,
+ FIELD_PREP(DP83822_100BASE_TX_LINE_DRIVER_SWING,
+ dp83822->tx_amplitude_100base_tx_index));
+
err = dp83822_config_init_leds(phydev);
if (err)
return err;
@@ -719,7 +730,12 @@ static int dp83822_phy_reset(struct phy_device *phydev)
return phydev->drv->config_init(phydev);
}
-#ifdef CONFIG_OF_MDIO
+#if IS_ENABLED(CONFIG_OF_MDIO)
+static const u32 tx_amplitude_100base_tx_gain[] = {
+ 80, 82, 83, 85, 87, 88, 90, 92,
+ 93, 95, 97, 98, 100, 102, 103, 105,
+};
+
static int dp83822_of_init_leds(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
@@ -780,6 +796,8 @@ static int dp83822_of_init(struct phy_device *phydev)
struct dp83822_private *dp83822 = phydev->priv;
struct device *dev = &phydev->mdio.dev;
const char *of_val;
+ int i, ret;
+ u32 val;
/* Signal detection for the PHY is only enabled if the FX_EN and the
* SD_EN pins are strapped. Signal detection can only enabled if FX_EN
@@ -815,6 +833,25 @@ static int dp83822_of_init(struct phy_device *phydev)
dp83822->set_gpio2_clk_out = true;
}
+ ret = phy_get_tx_amplitude_gain(phydev, dev,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ &val);
+ if (!ret) {
+ for (i = 0; i < ARRAY_SIZE(tx_amplitude_100base_tx_gain); i++) {
+ if (tx_amplitude_100base_tx_gain[i] == val) {
+ dp83822->tx_amplitude_100base_tx_index = i;
+ break;
+ }
+ }
+
+ if (dp83822->tx_amplitude_100base_tx_index < 0) {
+ phydev_err(phydev,
+ "Invalid value for tx-amplitude-100base-tx-percent property (%u)\n",
+ val);
+ return -EINVAL;
+ }
+ }
+
return dp83822_of_init_leds(phydev);
}
@@ -893,6 +930,7 @@ static int dp8382x_probe(struct phy_device *phydev)
if (!dp83822)
return -ENOMEM;
+ dp83822->tx_amplitude_100base_tx_index = -1;
phydev->priv = dp83822;
return 0;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 9c0b1c229af6..4033e948f953 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -2002,12 +2002,6 @@ static int ksz9477_config_init(struct phy_device *phydev)
return err;
}
- /* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes
- * in this switch shall be regarded as broken.
- */
- if (phydev->dev_flags & MICREL_NO_EEE)
- phy_disable_eee(phydev);
-
return kszphy_config_init(phydev);
}
@@ -5680,7 +5674,6 @@ static struct phy_driver ksphy_driver[] = {
.handle_interrupt = kszphy_handle_interrupt,
.suspend = genphy_suspend,
.resume = ksz9477_resume,
- .get_features = ksz9477_get_features,
} };
module_phy_driver(ksphy_driver);
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 0e17cc458efd..93de88c1c8fd 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -37,47 +37,6 @@ static int lan88xx_write_page(struct phy_device *phydev, int page)
return __phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, page);
}
-static int lan88xx_phy_config_intr(struct phy_device *phydev)
-{
- int rc;
-
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
- /* unmask all source and clear them before enable */
- rc = phy_write(phydev, LAN88XX_INT_MASK, 0x7FFF);
- rc = phy_read(phydev, LAN88XX_INT_STS);
- rc = phy_write(phydev, LAN88XX_INT_MASK,
- LAN88XX_INT_MASK_MDINTPIN_EN_ |
- LAN88XX_INT_MASK_LINK_CHANGE_);
- } else {
- rc = phy_write(phydev, LAN88XX_INT_MASK, 0);
- if (rc)
- return rc;
-
- /* Ack interrupts after they have been disabled */
- rc = phy_read(phydev, LAN88XX_INT_STS);
- }
-
- return rc < 0 ? rc : 0;
-}
-
-static irqreturn_t lan88xx_handle_interrupt(struct phy_device *phydev)
-{
- int irq_status;
-
- irq_status = phy_read(phydev, LAN88XX_INT_STS);
- if (irq_status < 0) {
- phy_error(phydev);
- return IRQ_NONE;
- }
-
- if (!(irq_status & LAN88XX_INT_STS_LINK_CHANGE_))
- return IRQ_NONE;
-
- phy_trigger_machine(phydev);
-
- return IRQ_HANDLED;
-}
-
static int lan88xx_suspend(struct phy_device *phydev)
{
struct lan88xx_priv *priv = phydev->priv;
@@ -528,8 +487,9 @@ static struct phy_driver microchip_phy_driver[] = {
.config_aneg = lan88xx_config_aneg,
.link_change_notify = lan88xx_link_change_notify,
- .config_intr = lan88xx_phy_config_intr,
- .handle_interrupt = lan88xx_handle_interrupt,
+ /* Interrupt handling is broken, do not define related
+ * functions to force polling.
+ */
.suspend = lan88xx_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index e9fc54517449..16e1c13ae2f8 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* NXP C45 PHY driver
- * Copyright 2021-2023 NXP
+ * Copyright 2021-2025 NXP
* Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
*/
@@ -19,6 +19,8 @@
#include "nxp-c45-tja11xx.h"
+#define PHY_ID_MASK GENMASK(31, 4)
+/* Same id: TJA1103, TJA1104 */
#define PHY_ID_TJA_1103 0x001BB010
#define PHY_ID_TJA_1120 0x001BB031
@@ -1956,6 +1958,30 @@ static void tja1120_nmi_handler(struct phy_device *phydev,
}
}
+static int nxp_c45_macsec_ability(struct phy_device *phydev)
+{
+ bool macsec_ability;
+ int phy_abilities;
+
+ phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PORT_ABILITIES);
+ macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
+
+ return macsec_ability;
+}
+
+static int tja1103_match_phy_device(struct phy_device *phydev)
+{
+ return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) &&
+ !nxp_c45_macsec_ability(phydev);
+}
+
+static int tja1104_match_phy_device(struct phy_device *phydev)
+{
+ return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) &&
+ nxp_c45_macsec_ability(phydev);
+}
+
static const struct nxp_c45_regmap tja1120_regmap = {
.vend1_ptp_clk_period = 0x1020,
.vend1_event_msg_filt = 0x9010,
@@ -2026,7 +2052,6 @@ static const struct nxp_c45_phy_data tja1120_phy_data = {
static struct phy_driver nxp_c45_driver[] = {
{
- PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
.name = "NXP C45 TJA1103",
.get_features = nxp_c45_get_features,
.driver_data = &tja1103_phy_data,
@@ -2048,6 +2073,31 @@ static struct phy_driver nxp_c45_driver[] = {
.get_sqi = nxp_c45_get_sqi,
.get_sqi_max = nxp_c45_get_sqi_max,
.remove = nxp_c45_remove,
+ .match_phy_device = tja1103_match_phy_device,
+ },
+ {
+ .name = "NXP C45 TJA1104",
+ .get_features = nxp_c45_get_features,
+ .driver_data = &tja1103_phy_data,
+ .probe = nxp_c45_probe,
+ .soft_reset = nxp_c45_soft_reset,
+ .config_aneg = genphy_c45_config_aneg,
+ .config_init = nxp_c45_config_init,
+ .config_intr = tja1103_config_intr,
+ .handle_interrupt = nxp_c45_handle_interrupt,
+ .read_status = genphy_c45_read_status,
+ .suspend = genphy_c45_pma_suspend,
+ .resume = genphy_c45_pma_resume,
+ .get_sset_count = nxp_c45_get_sset_count,
+ .get_strings = nxp_c45_get_strings,
+ .get_stats = nxp_c45_get_stats,
+ .cable_test_start = nxp_c45_cable_test_start,
+ .cable_test_get_status = nxp_c45_cable_test_get_status,
+ .set_loopback = genphy_c45_loopback,
+ .get_sqi = nxp_c45_get_sqi,
+ .get_sqi_max = nxp_c45_get_sqi_max,
+ .remove = nxp_c45_remove,
+ .match_phy_device = tja1104_match_phy_device,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120),
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 92161af788af..2a01887c5617 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -3123,19 +3123,12 @@ void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause)
EXPORT_SYMBOL(phy_get_pause);
#if IS_ENABLED(CONFIG_OF_MDIO)
-static int phy_get_int_delay_property(struct device *dev, const char *name)
+static int phy_get_u32_property(struct device *dev, const char *name, u32 *val)
{
- s32 int_delay;
- int ret;
-
- ret = device_property_read_u32(dev, name, &int_delay);
- if (ret)
- return ret;
-
- return int_delay;
+ return device_property_read_u32(dev, name, val);
}
#else
-static int phy_get_int_delay_property(struct device *dev, const char *name)
+static int phy_get_u32_property(struct device *dev, const char *name, u32 *val)
{
return -EINVAL;
}
@@ -3160,12 +3153,12 @@ static int phy_get_int_delay_property(struct device *dev, const char *name)
s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
const int *delay_values, int size, bool is_rx)
{
- s32 delay;
- int i;
+ int i, ret;
+ u32 delay;
if (is_rx) {
- delay = phy_get_int_delay_property(dev, "rx-internal-delay-ps");
- if (delay < 0 && size == 0) {
+ ret = phy_get_u32_property(dev, "rx-internal-delay-ps", &delay);
+ if (ret < 0 && size == 0) {
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
return 1;
@@ -3174,8 +3167,8 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
}
} else {
- delay = phy_get_int_delay_property(dev, "tx-internal-delay-ps");
- if (delay < 0 && size == 0) {
+ ret = phy_get_u32_property(dev, "tx-internal-delay-ps", &delay);
+ if (ret < 0 && size == 0) {
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
return 1;
@@ -3184,8 +3177,8 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
}
}
- if (delay < 0)
- return delay;
+ if (ret < 0)
+ return ret;
if (size == 0)
return delay;
@@ -3220,6 +3213,30 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
}
EXPORT_SYMBOL(phy_get_internal_delay);
+/**
+ * phy_get_tx_amplitude_gain - stores tx amplitude gain in @val
+ * @phydev: phy_device struct
+ * @dev: pointer to the devices device struct
+ * @linkmode: linkmode for which the tx amplitude gain should be retrieved
+ * @val: tx amplitude gain
+ *
+ * Returns: 0 on success, < 0 on failure
+ */
+int phy_get_tx_amplitude_gain(struct phy_device *phydev, struct device *dev,
+ enum ethtool_link_mode_bit_indices linkmode,
+ u32 *val)
+{
+ switch (linkmode) {
+ case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
+ return phy_get_u32_property(dev,
+ "tx-amplitude-100base-tx-percent",
+ val);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(phy_get_tx_amplitude_gain);
+
static int phy_led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness value)
{
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index f550576eb9da..6f9d8da76c4d 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -91,9 +91,8 @@ int phy_led_triggers_register(struct phy_device *phy)
if (!phy->phy_num_led_triggers)
return 0;
- phy->led_link_trigger = devm_kzalloc(&phy->mdio.dev,
- sizeof(*phy->led_link_trigger),
- GFP_KERNEL);
+ phy->led_link_trigger = kzalloc(sizeof(*phy->led_link_trigger),
+ GFP_KERNEL);
if (!phy->led_link_trigger) {
err = -ENOMEM;
goto out_clear;
@@ -103,10 +102,9 @@ int phy_led_triggers_register(struct phy_device *phy)
if (err)
goto out_free_link;
- phy->phy_led_triggers = devm_kcalloc(&phy->mdio.dev,
- phy->phy_num_led_triggers,
- sizeof(struct phy_led_trigger),
- GFP_KERNEL);
+ phy->phy_led_triggers = kcalloc(phy->phy_num_led_triggers,
+ sizeof(struct phy_led_trigger),
+ GFP_KERNEL);
if (!phy->phy_led_triggers) {
err = -ENOMEM;
goto out_unreg_link;
@@ -127,11 +125,11 @@ int phy_led_triggers_register(struct phy_device *phy)
out_unreg:
while (i--)
phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
- devm_kfree(&phy->mdio.dev, phy->phy_led_triggers);
+ kfree(phy->phy_led_triggers);
out_unreg_link:
phy_led_trigger_unregister(phy->led_link_trigger);
out_free_link:
- devm_kfree(&phy->mdio.dev, phy->led_link_trigger);
+ kfree(phy->led_link_trigger);
phy->led_link_trigger = NULL;
out_clear:
phy->phy_num_led_triggers = 0;
@@ -145,8 +143,13 @@ void phy_led_triggers_unregister(struct phy_device *phy)
for (i = 0; i < phy->phy_num_led_triggers; i++)
phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
+ kfree(phy->phy_led_triggers);
+ phy->phy_led_triggers = NULL;
- if (phy->led_link_trigger)
+ if (phy->led_link_trigger) {
phy_led_trigger_unregister(phy->led_link_trigger);
+ kfree(phy->led_link_trigger);
+ phy->led_link_trigger = NULL;
+ }
}
EXPORT_SYMBOL_GPL(phy_led_triggers_unregister);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index b00a315de060..8c4dfe9dc765 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -82,12 +82,15 @@ struct phylink {
unsigned int pcs_state;
bool link_failed;
+ bool suspend_link_up;
+ bool major_config_failed;
bool mac_supports_eee_ops;
bool mac_supports_eee;
bool phy_enable_tx_lpi;
bool mac_enable_tx_lpi;
bool mac_tx_clk_stop;
u32 mac_tx_lpi_timer;
+ u8 mac_rx_clk_stop_blocked;
struct sfp_bus *sfp_bus;
bool sfp_may_have_phy;
@@ -1360,12 +1363,16 @@ static void phylink_major_config(struct phylink *pl, bool restart,
phylink_an_mode_str(pl->req_link_an_mode),
phy_modes(state->interface));
+ pl->major_config_failed = false;
+
if (pl->mac_ops->mac_select_pcs) {
pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface);
if (IS_ERR(pcs)) {
phylink_err(pl,
"mac_select_pcs unexpectedly failed: %pe\n",
pcs);
+
+ pl->major_config_failed = true;
return;
}
@@ -1387,6 +1394,7 @@ static void phylink_major_config(struct phylink *pl, bool restart,
if (err < 0) {
phylink_err(pl, "mac_prepare failed: %pe\n",
ERR_PTR(err));
+ pl->major_config_failed = true;
return;
}
}
@@ -1410,8 +1418,15 @@ static void phylink_major_config(struct phylink *pl, bool restart,
phylink_mac_config(pl, state);
- if (pl->pcs)
- phylink_pcs_post_config(pl->pcs, state->interface);
+ if (pl->pcs) {
+ err = phylink_pcs_post_config(pl->pcs, state->interface);
+ if (err < 0) {
+ phylink_err(pl, "pcs_post_config failed: %pe\n",
+ ERR_PTR(err));
+
+ pl->major_config_failed = true;
+ }
+ }
if (pl->pcs_state == PCS_STATE_STARTING || pcs_changed)
phylink_pcs_enable(pl->pcs);
@@ -1422,11 +1437,12 @@ static void phylink_major_config(struct phylink *pl, bool restart,
err = phylink_pcs_config(pl->pcs, neg_mode, state,
!!(pl->link_config.pause & MLO_PAUSE_AN));
- if (err < 0)
- phylink_err(pl, "pcs_config failed: %pe\n",
- ERR_PTR(err));
- else if (err > 0)
+ if (err < 0) {
+ phylink_err(pl, "pcs_config failed: %pe\n", ERR_PTR(err));
+ pl->major_config_failed = true;
+ } else if (err > 0) {
restart = true;
+ }
if (restart)
phylink_pcs_an_restart(pl);
@@ -1434,16 +1450,22 @@ static void phylink_major_config(struct phylink *pl, bool restart,
if (pl->mac_ops->mac_finish) {
err = pl->mac_ops->mac_finish(pl->config, pl->act_link_an_mode,
state->interface);
- if (err < 0)
+ if (err < 0) {
phylink_err(pl, "mac_finish failed: %pe\n",
ERR_PTR(err));
+
+ pl->major_config_failed = true;
+ }
}
if (pl->phydev && pl->phy_ib_mode) {
err = phy_config_inband(pl->phydev, pl->phy_ib_mode);
- if (err < 0)
+ if (err < 0) {
phylink_err(pl, "phy_config_inband: %pe\n",
ERR_PTR(err));
+
+ pl->major_config_failed = true;
+ }
}
if (pl->sfp_bus) {
@@ -1795,6 +1817,12 @@ static void phylink_resolve(struct work_struct *w)
}
}
+ /* If configuration of the interface failed, force the link down
+ * until we get a successful configuration.
+ */
+ if (pl->major_config_failed)
+ link_state.link = false;
+
if (link_state.link != cur_link_state) {
pl->old_link_state = link_state.link;
if (!link_state.link)
@@ -2046,7 +2074,7 @@ bool phylink_expects_phy(struct phylink *pl)
{
if (pl->cfg_link_an_mode == MLO_AN_FIXED ||
(pl->cfg_link_an_mode == MLO_AN_INBAND &&
- phy_interface_mode_is_8023z(pl->link_config.interface)))
+ phy_interface_mode_is_8023z(pl->link_interface)))
return false;
return true;
}
@@ -2595,6 +2623,64 @@ void phylink_stop(struct phylink *pl)
EXPORT_SYMBOL_GPL(phylink_stop);
/**
+ * phylink_rx_clk_stop_block() - block PHY ability to stop receive clock in LPI
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Disable the PHY's ability to stop the receive clock while the receive path
+ * is in EEE LPI state, until the number of calls to phylink_rx_clk_stop_block()
+ * are balanced by calls to phylink_rx_clk_stop_unblock().
+ */
+void phylink_rx_clk_stop_block(struct phylink *pl)
+{
+ ASSERT_RTNL();
+
+ if (pl->mac_rx_clk_stop_blocked == U8_MAX) {
+ phylink_warn(pl, "%s called too many times - ignoring\n",
+ __func__);
+ dump_stack();
+ return;
+ }
+
+ /* Disable PHY receive clock stop if this is the first time this
+ * function has been called and clock-stop was previously enabled.
+ */
+ if (pl->mac_rx_clk_stop_blocked++ == 0 &&
+ pl->mac_supports_eee_ops && pl->phydev &&
+ pl->config->eee_rx_clk_stop_enable)
+ phy_eee_rx_clock_stop(pl->phydev, false);
+}
+EXPORT_SYMBOL_GPL(phylink_rx_clk_stop_block);
+
+/**
+ * phylink_rx_clk_stop_unblock() - unblock PHY ability to stop receive clock
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * All calls to phylink_rx_clk_stop_block() must be balanced with a
+ * corresponding call to phylink_rx_clk_stop_unblock() to restore the PHYs
+ * ability to stop the receive clock when the receive path is in EEE LPI mode.
+ */
+void phylink_rx_clk_stop_unblock(struct phylink *pl)
+{
+ ASSERT_RTNL();
+
+ if (pl->mac_rx_clk_stop_blocked == 0) {
+ phylink_warn(pl, "%s called too many times - ignoring\n",
+ __func__);
+ dump_stack();
+ return;
+ }
+
+ /* Re-enable PHY receive clock stop if the number of unblocks matches
+ * the number of calls to the block function above.
+ */
+ if (--pl->mac_rx_clk_stop_blocked == 0 &&
+ pl->mac_supports_eee_ops && pl->phydev &&
+ pl->config->eee_rx_clk_stop_enable)
+ phy_eee_rx_clock_stop(pl->phydev, true);
+}
+EXPORT_SYMBOL_GPL(phylink_rx_clk_stop_unblock);
+
+/**
* phylink_suspend() - handle a network device suspend event
* @pl: a pointer to a &struct phylink returned from phylink_create()
* @mac_wol: true if the MAC needs to receive packets for Wake-on-Lan
@@ -2619,14 +2705,16 @@ void phylink_suspend(struct phylink *pl, bool mac_wol)
/* Stop the resolver bringing the link up */
__set_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state);
- /* Disable the carrier, to prevent transmit timeouts,
- * but one would hope all packets have been sent. This
- * also means phylink_resolve() will do nothing.
- */
- if (pl->netdev)
- netif_carrier_off(pl->netdev);
- else
+ pl->suspend_link_up = phylink_link_is_up(pl);
+ if (pl->suspend_link_up) {
+ /* Disable the carrier, to prevent transmit timeouts,
+ * but one would hope all packets have been sent. This
+ * also means phylink_resolve() will do nothing.
+ */
+ if (pl->netdev)
+ netif_carrier_off(pl->netdev);
pl->old_link_state = false;
+ }
/* We do not call mac_link_down() here as we want the
* link to remain up to receive the WoL packets.
@@ -2639,6 +2727,31 @@ void phylink_suspend(struct phylink *pl, bool mac_wol)
EXPORT_SYMBOL_GPL(phylink_suspend);
/**
+ * phylink_prepare_resume() - prepare to resume a network device
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Optional, but if called must be called prior to phylink_resume().
+ *
+ * Prepare to resume a network device, preparing the PHY as necessary.
+ */
+void phylink_prepare_resume(struct phylink *pl)
+{
+ struct phy_device *phydev = pl->phydev;
+
+ ASSERT_RTNL();
+
+ /* IEEE 802.3 22.2.4.1.5 allows PHYs to stop their receive clock
+ * when PDOWN is set. However, some MACs require RXC to be running
+ * in order to resume. If the MAC requires RXC, and we have a PHY,
+ * then resume the PHY. Note that 802.3 allows PHYs 500ms before
+ * the clock meets requirements. We do not implement this delay.
+ */
+ if (pl->config->mac_requires_rxc && phydev && phydev->suspended)
+ phy_resume(phydev);
+}
+EXPORT_SYMBOL_GPL(phylink_prepare_resume);
+
+/**
* phylink_resume() - handle a network device resume event
* @pl: a pointer to a &struct phylink returned from phylink_create()
*
@@ -2652,15 +2765,18 @@ void phylink_resume(struct phylink *pl)
if (test_bit(PHYLINK_DISABLE_MAC_WOL, &pl->phylink_disable_state)) {
/* Wake-on-Lan enabled, MAC handling */
- /* Call mac_link_down() so we keep the overall state balanced.
- * Do this under the state_mutex lock for consistency. This
- * will cause a "Link Down" message to be printed during
- * resume, which is harmless - the true link state will be
- * printed when we run a resolve.
- */
- mutex_lock(&pl->state_mutex);
- phylink_link_down(pl);
- mutex_unlock(&pl->state_mutex);
+ if (pl->suspend_link_up) {
+ /* Call mac_link_down() so we keep the overall state
+ * balanced. Do this under the state_mutex lock for
+ * consistency. This will cause a "Link Down" message
+ * to be printed during resume, which is harmless -
+ * the true link state will be printed when we run a
+ * resolve.
+ */
+ mutex_lock(&pl->state_mutex);
+ phylink_link_down(pl);
+ mutex_unlock(&pl->state_mutex);
+ }
/* Re-apply the link parameters so that all the settings get
* restored to the MAC.
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 96fa3857d8e2..2cab046749a9 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -10085,6 +10085,7 @@ static const struct usb_device_id rtl8152_table[] = {
{ USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) },
{ USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) },
{ USB_DEVICE(VENDOR_ID_DLINK, 0xb301) },
+ { USB_DEVICE(VENDOR_ID_DELL, 0xb097) },
{ USB_DEVICE(VENDOR_ID_ASUS, 0x1976) },
{}
};
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index bb0bf1415872..7b3739b29c8f 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -630,16 +630,6 @@ static const struct driver_info zte_rndis_info = {
.tx_fixup = rndis_tx_fixup,
};
-static const struct driver_info wwan_rndis_info = {
- .description = "Mobile Broadband RNDIS device",
- .flags = FLAG_WWAN | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT,
- .bind = rndis_bind,
- .unbind = rndis_unbind,
- .status = rndis_status,
- .rx_fixup = rndis_rx_fixup,
- .tx_fixup = rndis_tx_fixup,
-};
-
/*-------------------------------------------------------------------------*/
static const struct usb_device_id products [] = {
@@ -676,11 +666,9 @@ static const struct usb_device_id products [] = {
USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3),
.driver_info = (unsigned long) &rndis_info,
}, {
- /* Mobile Broadband Modem, seen in Novatel Verizon USB730L and
- * Telit FN990A (RNDIS)
- */
+ /* Novatel Verizon USB730L */
USB_INTERFACE_INFO(USB_CLASS_MISC, 4, 1),
- .driver_info = (unsigned long)&wwan_rndis_info,
+ .driver_info = (unsigned long) &rndis_info,
},
{ }, // END
};
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d1ed544ba03a..8879af5292b4 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2789,7 +2789,8 @@ static void skb_recv_done(struct virtqueue *rvq)
virtqueue_napi_schedule(&rq->napi, rvq);
}
-static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
+static void virtnet_napi_do_enable(struct virtqueue *vq,
+ struct napi_struct *napi)
{
napi_enable(napi);
@@ -2802,10 +2803,16 @@ static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
local_bh_enable();
}
-static void virtnet_napi_tx_enable(struct virtnet_info *vi,
- struct virtqueue *vq,
- struct napi_struct *napi)
+static void virtnet_napi_enable(struct receive_queue *rq)
+{
+ virtnet_napi_do_enable(rq->vq, &rq->napi);
+}
+
+static void virtnet_napi_tx_enable(struct send_queue *sq)
{
+ struct virtnet_info *vi = sq->vq->vdev->priv;
+ struct napi_struct *napi = &sq->napi;
+
if (!napi->weight)
return;
@@ -2817,15 +2824,24 @@ static void virtnet_napi_tx_enable(struct virtnet_info *vi,
return;
}
- return virtnet_napi_enable(vq, napi);
+ virtnet_napi_do_enable(sq->vq, napi);
}
-static void virtnet_napi_tx_disable(struct napi_struct *napi)
+static void virtnet_napi_tx_disable(struct send_queue *sq)
{
+ struct napi_struct *napi = &sq->napi;
+
if (napi->weight)
napi_disable(napi);
}
+static void virtnet_napi_disable(struct receive_queue *rq)
+{
+ struct napi_struct *napi = &rq->napi;
+
+ napi_disable(napi);
+}
+
static void refill_work(struct work_struct *work)
{
struct virtnet_info *vi =
@@ -2836,9 +2852,9 @@ static void refill_work(struct work_struct *work)
for (i = 0; i < vi->curr_queue_pairs; i++) {
struct receive_queue *rq = &vi->rq[i];
- napi_disable(&rq->napi);
+ virtnet_napi_disable(rq);
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
- virtnet_napi_enable(rq->vq, &rq->napi);
+ virtnet_napi_enable(rq);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again.
@@ -3035,8 +3051,8 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
{
- virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
- napi_disable(&vi->rq[qp_index].napi);
+ virtnet_napi_tx_disable(&vi->sq[qp_index]);
+ virtnet_napi_disable(&vi->rq[qp_index]);
xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
}
@@ -3055,8 +3071,8 @@ static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
if (err < 0)
goto err_xdp_reg_mem_model;
- virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
- virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
+ virtnet_napi_enable(&vi->rq[qp_index]);
+ virtnet_napi_tx_enable(&vi->sq[qp_index]);
return 0;
@@ -3302,25 +3318,75 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
+static void __virtnet_rx_pause(struct virtnet_info *vi,
+ struct receive_queue *rq)
{
bool running = netif_running(vi->dev);
if (running) {
- napi_disable(&rq->napi);
+ virtnet_napi_disable(rq);
virtnet_cancel_dim(vi, &rq->dim);
}
}
-static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
+static void virtnet_rx_pause_all(struct virtnet_info *vi)
+{
+ int i;
+
+ /*
+ * Make sure refill_work does not run concurrently to
+ * avoid napi_disable race which leads to deadlock.
+ */
+ disable_delayed_refill(vi);
+ cancel_delayed_work_sync(&vi->refill);
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ __virtnet_rx_pause(vi, &vi->rq[i]);
+}
+
+static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
+{
+ /*
+ * Make sure refill_work does not run concurrently to
+ * avoid napi_disable race which leads to deadlock.
+ */
+ disable_delayed_refill(vi);
+ cancel_delayed_work_sync(&vi->refill);
+ __virtnet_rx_pause(vi, rq);
+}
+
+static void __virtnet_rx_resume(struct virtnet_info *vi,
+ struct receive_queue *rq,
+ bool refill)
{
bool running = netif_running(vi->dev);
+ bool schedule_refill = false;
- if (!try_fill_recv(vi, rq, GFP_KERNEL))
+ if (refill && !try_fill_recv(vi, rq, GFP_KERNEL))
+ schedule_refill = true;
+ if (running)
+ virtnet_napi_enable(rq);
+
+ if (schedule_refill)
schedule_delayed_work(&vi->refill, 0);
+}
- if (running)
- virtnet_napi_enable(rq->vq, &rq->napi);
+static void virtnet_rx_resume_all(struct virtnet_info *vi)
+{
+ int i;
+
+ enable_delayed_refill(vi);
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ if (i < vi->curr_queue_pairs)
+ __virtnet_rx_resume(vi, &vi->rq[i], true);
+ else
+ __virtnet_rx_resume(vi, &vi->rq[i], false);
+ }
+}
+
+static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
+{
+ enable_delayed_refill(vi);
+ __virtnet_rx_resume(vi, rq, true);
}
static int virtnet_rx_resize(struct virtnet_info *vi,
@@ -3349,7 +3415,7 @@ static void virtnet_tx_pause(struct virtnet_info *vi, struct send_queue *sq)
qindex = sq - vi->sq;
if (running)
- virtnet_napi_tx_disable(&sq->napi);
+ virtnet_napi_tx_disable(sq);
txq = netdev_get_tx_queue(vi->dev, qindex);
@@ -3383,7 +3449,7 @@ static void virtnet_tx_resume(struct virtnet_info *vi, struct send_queue *sq)
__netif_tx_unlock_bh(txq);
if (running)
- virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
+ virtnet_napi_tx_enable(sq);
}
static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq,
@@ -3636,8 +3702,10 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
succ:
vi->curr_queue_pairs = queue_pairs;
/* virtnet_open() will refill when device is going to up. */
- if (dev->flags & IFF_UP)
+ spin_lock_bh(&vi->refill_lock);
+ if (dev->flags & IFF_UP && vi->refill_enabled)
schedule_delayed_work(&vi->refill, 0);
+ spin_unlock_bh(&vi->refill_lock);
return 0;
}
@@ -5595,6 +5663,10 @@ static void virtnet_get_base_stats(struct net_device *dev,
if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED)
tx->hw_drop_ratelimits = 0;
+
+ netdev_stat_queue_sum(dev,
+ dev->real_num_rx_queues, vi->max_queue_pairs, rx,
+ dev->real_num_tx_queues, vi->max_queue_pairs, tx);
}
static const struct netdev_stat_ops virtnet_stat_ops = {
@@ -5802,8 +5874,10 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
hdr_dma = virtqueue_dma_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len,
DMA_TO_DEVICE, 0);
- if (virtqueue_dma_mapping_error(sq->vq, hdr_dma))
- return -ENOMEM;
+ if (virtqueue_dma_mapping_error(sq->vq, hdr_dma)) {
+ err = -ENOMEM;
+ goto err_free_buffs;
+ }
err = xsk_pool_dma_map(pool, dma_dev, 0);
if (err)
@@ -5831,6 +5905,8 @@ err_rq:
err_xsk_map:
virtqueue_dma_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len,
DMA_TO_DEVICE, 0);
+err_free_buffs:
+ kvfree(rq->xsk_buffs);
return err;
}
@@ -5923,12 +5999,12 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (prog)
bpf_prog_add(prog, vi->max_queue_pairs - 1);
+ virtnet_rx_pause_all(vi);
+
/* Make sure NAPI is not using any XDP TX queues for RX. */
if (netif_running(dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++) {
- napi_disable(&vi->rq[i].napi);
- virtnet_napi_tx_disable(&vi->sq[i].napi);
- }
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ virtnet_napi_tx_disable(&vi->sq[i]);
}
if (!prog) {
@@ -5960,14 +6036,12 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
vi->xdp_enabled = false;
}
+ virtnet_rx_resume_all(vi);
for (i = 0; i < vi->max_queue_pairs; i++) {
if (old_prog)
bpf_prog_put(old_prog);
- if (netif_running(dev)) {
- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
- virtnet_napi_tx_enable(vi, vi->sq[i].vq,
- &vi->sq[i].napi);
- }
+ if (netif_running(dev))
+ virtnet_napi_tx_enable(&vi->sq[i]);
}
return 0;
@@ -5979,12 +6053,10 @@ err:
rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
}
+ virtnet_rx_resume_all(vi);
if (netif_running(dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++) {
- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
- virtnet_napi_tx_enable(vi, vi->sq[i].vq,
- &vi->sq[i].napi);
- }
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ virtnet_napi_tx_enable(&vi->sq[i]);
}
if (prog)
bpf_prog_sub(prog, vi->max_queue_pairs - 1);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 3df6aabc7e33..c676979c7ab9 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -3607,8 +3607,6 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
int err = 0;
- WRITE_ONCE(netdev->mtu, new_mtu);
-
/*
* Reset_work may be in the middle of resetting the device, wait for its
* completion.
@@ -3622,6 +3620,7 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
/* we need to re-create the rx queue based on the new mtu */
vmxnet3_rq_destroy_all(adapter);
+ WRITE_ONCE(netdev->mtu, new_mtu);
vmxnet3_adjust_rx_ring_size(adapter);
err = vmxnet3_rq_create_all(adapter);
if (err) {
@@ -3638,6 +3637,8 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
"Closing it\n", err);
goto out;
}
+ } else {
+ WRITE_ONCE(netdev->mtu, new_mtu);
}
out:
diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c
index 616ecc38d172..5f470499e600 100644
--- a/drivers/net/vmxnet3/vmxnet3_xdp.c
+++ b/drivers/net/vmxnet3/vmxnet3_xdp.c
@@ -397,7 +397,7 @@ vmxnet3_process_xdp(struct vmxnet3_adapter *adapter,
xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq);
xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset,
- rbi->len, false);
+ rcd->len, false);
xdp_buff_clear_frags_flag(&xdp);
xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog);
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 92516189e792..cdd2a78badf5 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -227,9 +227,9 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
be32_to_cpu(fdb->vni)))
goto nla_put_failure;
- ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
+ ci.ndm_used = jiffies_to_clock_t(now - READ_ONCE(fdb->used));
ci.ndm_confirmed = 0;
- ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
+ ci.ndm_updated = jiffies_to_clock_t(now - READ_ONCE(fdb->updated));
ci.ndm_refcnt = 0;
if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
@@ -434,8 +434,8 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
struct vxlan_fdb *f;
f = __vxlan_find_mac(vxlan, mac, vni);
- if (f && f->used != jiffies)
- f->used = jiffies;
+ if (f && READ_ONCE(f->used) != jiffies)
+ WRITE_ONCE(f->used, jiffies);
return f;
}
@@ -1009,12 +1009,12 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
!(f->flags & NTF_VXLAN_ADDED_BY_USER)) {
if (f->state != state) {
f->state = state;
- f->updated = jiffies;
+ WRITE_ONCE(f->updated, jiffies);
notify = 1;
}
if (f->flags != fdb_flags) {
f->flags = fdb_flags;
- f->updated = jiffies;
+ WRITE_ONCE(f->updated, jiffies);
notify = 1;
}
}
@@ -1048,7 +1048,7 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan,
}
if (ndm_flags & NTF_USE)
- f->used = jiffies;
+ WRITE_ONCE(f->used, jiffies);
if (notify) {
if (rd == NULL)
@@ -1481,7 +1481,7 @@ static enum skb_drop_reason vxlan_snoop(struct net_device *dev,
src_mac, &rdst->remote_ip.sa, &src_ip->sa);
rdst->remote_ip = *src_ip;
- f->updated = jiffies;
+ WRITE_ONCE(f->updated, jiffies);
vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL);
} else {
u32 hash_index = fdb_head_index(vxlan, src_mac, vni);
@@ -2852,7 +2852,7 @@ static void vxlan_cleanup(struct timer_list *t)
if (f->flags & NTF_EXT_LEARNED)
continue;
- timeout = f->used + vxlan->cfg.age_interval * HZ;
+ timeout = READ_ONCE(f->used) + vxlan->cfg.age_interval * HZ;
if (time_before_eq(timeout, jiffies)) {
netdev_dbg(vxlan->dev,
"garbage collect %pM\n",
@@ -4415,6 +4415,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
+ bool rem_ip_changed, change_igmp;
struct net_device *lowerdev;
struct vxlan_config conf;
struct vxlan_rdst *dst;
@@ -4438,8 +4439,13 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
if (err)
return err;
+ rem_ip_changed = !vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip);
+ change_igmp = vxlan->dev->flags & IFF_UP &&
+ (rem_ip_changed ||
+ dst->remote_ifindex != conf.remote_ifindex);
+
/* handle default dst entry */
- if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) {
+ if (rem_ip_changed) {
u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni);
spin_lock_bh(&vxlan->hash_lock[hash_index]);
@@ -4483,6 +4489,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
}
}
+ if (change_igmp && vxlan_addr_multicast(&dst->remote_ip))
+ err = vxlan_multicast_leave(vxlan);
+
if (conf.age_interval != vxlan->cfg.age_interval)
mod_timer(&vxlan->age_timer, jiffies);
@@ -4490,7 +4499,12 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
if (lowerdev && lowerdev != dst->remote_dev)
dst->remote_dev = lowerdev;
vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true);
- return 0;
+
+ if (!err && change_igmp &&
+ vxlan_addr_multicast(&dst->remote_ip))
+ err = vxlan_multicast_join(vxlan);
+
+ return err;
}
static void vxlan_dellink(struct net_device *dev, struct list_head *head)
diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
index 6e6e9f05509a..06d19e90eadb 100644
--- a/drivers/net/vxlan/vxlan_vnifilter.c
+++ b/drivers/net/vxlan/vxlan_vnifilter.c
@@ -627,7 +627,11 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan,
* default dst remote_ip previously added for this vni
*/
if (!vxlan_addr_any(&vninode->remote_ip) ||
- !vxlan_addr_any(&dst->remote_ip))
+ !vxlan_addr_any(&dst->remote_ip)) {
+ u32 hash_index = fdb_head_index(vxlan, all_zeros_mac,
+ vninode->vni);
+
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
__vxlan_fdb_delete(vxlan, all_zeros_mac,
(vxlan_addr_any(&vninode->remote_ip) ?
dst->remote_ip : vninode->remote_ip),
@@ -635,6 +639,8 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan,
vninode->vni, vninode->vni,
dst->remote_ifindex,
true);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+ }
if (vxlan->dev->flags & IFF_UP) {
if (vxlan_addr_multicast(&vninode->remote_ip) &&
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index f777314db8b3..7a55afd33be8 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023, 2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_DP_H
@@ -20,7 +20,6 @@ struct ath11k_ext_irq_grp;
struct dp_rx_tid {
u8 tid;
- u32 *vaddr;
dma_addr_t paddr;
u32 size;
u32 ba_win_sz;
@@ -37,6 +36,9 @@ struct dp_rx_tid {
/* Timer info related to fragments */
struct timer_list frag_timer;
struct ath11k_base *ab;
+ u32 *vaddr_unaligned;
+ dma_addr_t paddr_unaligned;
+ u32 unaligned_size;
};
#define DP_REO_DESC_FREE_THRESHOLD 64
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index b8b3dce9cdb5..a7a484a9ba7f 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/ieee80211.h>
@@ -675,11 +675,11 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
list_del(&cmd->list);
rx_tid = &cmd->data;
- if (rx_tid->vaddr) {
- dma_unmap_single(ab->dev, rx_tid->paddr,
- rx_tid->size, DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ if (rx_tid->vaddr_unaligned) {
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
kfree(cmd);
}
@@ -689,11 +689,11 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
list_del(&cmd_cache->list);
dp->reo_cmd_cache_flush_count--;
rx_tid = &cmd_cache->data;
- if (rx_tid->vaddr) {
- dma_unmap_single(ab->dev, rx_tid->paddr,
- rx_tid->size, DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ if (rx_tid->vaddr_unaligned) {
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
kfree(cmd_cache);
}
@@ -708,11 +708,11 @@ static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
if (status != HAL_REO_CMD_SUCCESS)
ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
rx_tid->tid, status);
- if (rx_tid->vaddr) {
- dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ if (rx_tid->vaddr_unaligned) {
+ dma_free_noncoherent(dp->ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
}
@@ -749,10 +749,10 @@ static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
if (ret) {
ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
rx_tid->tid, ret);
- dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
}
@@ -802,10 +802,10 @@ static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
return;
free_desc:
- dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
void ath11k_peer_rx_tid_delete(struct ath11k *ar,
@@ -831,14 +831,16 @@ void ath11k_peer_rx_tid_delete(struct ath11k *ar,
if (ret != -ESHUTDOWN)
ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
tid, ret);
- dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ dma_free_noncoherent(ar->ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
rx_tid->paddr = 0;
+ rx_tid->paddr_unaligned = 0;
rx_tid->size = 0;
+ rx_tid->unaligned_size = 0;
}
static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
@@ -982,10 +984,9 @@ static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
if (!rx_tid->active)
goto unlock_exit;
- dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
rx_tid->active = false;
@@ -1000,9 +1001,8 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
struct ath11k_base *ab = ar->ab;
struct ath11k_peer *peer;
struct dp_rx_tid *rx_tid;
- u32 hw_desc_sz;
- u32 *addr_aligned;
- void *vaddr;
+ u32 hw_desc_sz, *vaddr;
+ void *vaddr_unaligned;
dma_addr_t paddr;
int ret;
@@ -1050,37 +1050,34 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
else
hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
- vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
- if (!vaddr) {
+ rx_tid->unaligned_size = hw_desc_sz + HAL_LINK_DESC_ALIGN - 1;
+ vaddr_unaligned = dma_alloc_noncoherent(ab->dev, rx_tid->unaligned_size, &paddr,
+ DMA_BIDIRECTIONAL, GFP_ATOMIC);
+ if (!vaddr_unaligned) {
spin_unlock_bh(&ab->base_lock);
return -ENOMEM;
}
- addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
-
- ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
- ssn, pn_type);
-
- paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
- DMA_BIDIRECTIONAL);
-
- ret = dma_mapping_error(ab->dev, paddr);
- if (ret) {
- spin_unlock_bh(&ab->base_lock);
- ath11k_warn(ab, "failed to setup dma map for peer %pM rx tid %d: %d\n",
- peer_mac, tid, ret);
- goto err_mem_free;
- }
-
- rx_tid->vaddr = vaddr;
- rx_tid->paddr = paddr;
+ rx_tid->vaddr_unaligned = vaddr_unaligned;
+ vaddr = PTR_ALIGN(vaddr_unaligned, HAL_LINK_DESC_ALIGN);
+ rx_tid->paddr_unaligned = paddr;
+ rx_tid->paddr = rx_tid->paddr_unaligned + ((unsigned long)vaddr -
+ (unsigned long)rx_tid->vaddr_unaligned);
+ ath11k_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type);
rx_tid->size = hw_desc_sz;
rx_tid->active = true;
+ /* After dma_alloc_noncoherent, vaddr is being modified for reo qdesc setup.
+ * Since these changes are not reflected in the device, driver now needs to
+ * explicitly call dma_sync_single_for_device.
+ */
+ dma_sync_single_for_device(ab->dev, rx_tid->paddr,
+ rx_tid->size,
+ DMA_TO_DEVICE);
spin_unlock_bh(&ab->base_lock);
- ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
- paddr, tid, 1, ba_win_sz);
+ ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, rx_tid->paddr,
+ tid, 1, ba_win_sz);
if (ret) {
ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n",
peer_mac, tid, ret);
@@ -1088,12 +1085,6 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
}
return ret;
-
-err_mem_free:
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
-
- return ret;
}
int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index 212cd935e60a..d0aed4c56050 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -173,7 +173,7 @@ EXPORT_SYMBOL(ath12k_core_resume);
static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
size_t name_len, bool with_variant,
- bool bus_type_mode)
+ bool bus_type_mode, bool with_default)
{
/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
@@ -204,7 +204,9 @@ static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
"bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
ath12k_bus_str(ab->hif.bus),
ab->qmi.target.chip_id,
- ab->qmi.target.board_id, variant);
+ with_default ?
+ ATH12K_BOARD_ID_DEFAULT : ab->qmi.target.board_id,
+ variant);
break;
}
@@ -216,19 +218,19 @@ static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
size_t name_len)
{
- return __ath12k_core_create_board_name(ab, name, name_len, true, false);
+ return __ath12k_core_create_board_name(ab, name, name_len, true, false, false);
}
static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
size_t name_len)
{
- return __ath12k_core_create_board_name(ab, name, name_len, false, false);
+ return __ath12k_core_create_board_name(ab, name, name_len, false, false, true);
}
static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
size_t name_len)
{
- return __ath12k_core_create_board_name(ab, name, name_len, false, true);
+ return __ath12k_core_create_board_name(ab, name, name_len, false, true, true);
}
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
@@ -887,10 +889,41 @@ static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
ath12k_mac_destroy(ag);
}
+u8 ath12k_get_num_partner_link(struct ath12k *ar)
+{
+ struct ath12k_base *partner_ab, *ab = ar->ab;
+ struct ath12k_hw_group *ag = ab->ag;
+ struct ath12k_pdev *pdev;
+ u8 num_link = 0;
+ int i, j;
+
+ lockdep_assert_held(&ag->mutex);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ partner_ab = ag->ab[i];
+
+ for (j = 0; j < partner_ab->num_radios; j++) {
+ pdev = &partner_ab->pdevs[j];
+
+ /* Avoid the self link */
+ if (ar == pdev->ar)
+ continue;
+
+ num_link++;
+ }
+ }
+
+ return num_link;
+}
+
static int __ath12k_mac_mlo_ready(struct ath12k *ar)
{
+ u8 num_link = ath12k_get_num_partner_link(ar);
int ret;
+ if (num_link == 0)
+ return 0;
+
ret = ath12k_wmi_mlo_ready(ar);
if (ret) {
ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n",
@@ -932,7 +965,7 @@ static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
{
int ret, i;
- if (!ag->mlo_capable || ag->num_devices == 1)
+ if (!ag->mlo_capable)
return 0;
ret = ath12k_mac_mlo_setup(ag);
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index ee595794a7ae..96b2830e891b 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -87,6 +87,7 @@ enum wme_ac {
#define ATH12K_HT_MCS_MAX 7
#define ATH12K_VHT_MCS_MAX 9
#define ATH12K_HE_MCS_MAX 11
+#define ATH12K_EHT_MCS_MAX 15
enum ath12k_crypt_mode {
/* Only use hardware crypto engine */
@@ -166,6 +167,7 @@ struct ath12k_ext_irq_grp {
u32 num_irq;
u32 grp_id;
u64 timestamp;
+ bool napi_enabled;
struct napi_struct napi;
struct net_device *napi_ndev;
};
@@ -500,6 +502,7 @@ struct ath12k_link_sta {
struct ath12k_rx_peer_stats *rx_stats;
struct ath12k_wbm_tx_stats *wbm_tx_stats;
u32 bw_prev;
+ u32 peer_nss;
/* For now the assoc link will be considered primary */
bool is_assoc_link;
@@ -1084,6 +1087,7 @@ int ath12k_core_resume(struct ath12k_base *ab);
int ath12k_core_suspend(struct ath12k_base *ab);
int ath12k_core_suspend_late(struct ath12k_base *ab);
void ath12k_core_hw_group_unassign(struct ath12k_base *ab);
+u8 ath12k_get_num_partner_link(struct ath12k *ar);
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *filename);
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index b952e79179d0..8737dc8fea35 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "dp_mon.h"
@@ -638,6 +638,9 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
ppdu_info->num_mpdu_fcs_err =
u32_get_bits(info[0],
HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR);
+ ppdu_info->peer_id =
+ u32_get_bits(info[0], HAL_RX_PPDU_END_USER_STATS_INFO0_PEER_ID);
+
switch (ppdu_info->preamble_type) {
case HAL_RX_PREAMBLE_11N:
ppdu_info->ht_flags = 1;
@@ -655,6 +658,11 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
if (userid < HAL_MAX_UL_MU_USERS) {
struct hal_rx_user_status *rxuser_stats =
&ppdu_info->userstats[userid];
+
+ if (ppdu_info->num_mpdu_fcs_ok > 1 ||
+ ppdu_info->num_mpdu_fcs_err > 1)
+ ppdu_info->userstats[userid].ampdu_present = true;
+
ppdu_info->num_users += 1;
ath12k_dp_mon_rx_handle_ofdma_info(eu_stats, rxuser_stats);
@@ -755,8 +763,8 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
if (userid < HAL_MAX_UL_MU_USERS) {
info[0] = __le32_to_cpu(mpdu_start->info0);
ppdu_info->userid = userid;
- ppdu_info->ampdu_id[userid] =
- u32_get_bits(info[0], HAL_RX_MPDU_START_INFO1_PEERID);
+ ppdu_info->userstats[userid].ampdu_id =
+ u32_get_bits(info[0], HAL_RX_MPDU_START_INFO0_PPDU_ID);
}
break;
@@ -956,15 +964,14 @@ static void ath12k_dp_mon_update_radiotap(struct ath12k *ar,
{
struct ieee80211_supported_band *sband;
u8 *ptr = NULL;
- u16 ampdu_id = ppduinfo->ampdu_id[ppduinfo->userid];
rxs->flag |= RX_FLAG_MACTIME_START;
rxs->signal = ppduinfo->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR;
rxs->nss = ppduinfo->nss + 1;
- if (ampdu_id) {
+ if (ppduinfo->userstats[ppduinfo->userid].ampdu_present) {
rxs->flag |= RX_FLAG_AMPDU_DETAILS;
- rxs->ampdu_reference = ampdu_id;
+ rxs->ampdu_reference = ppduinfo->userstats[ppduinfo->userid].ampdu_id;
}
if (ppduinfo->he_mu_flags) {
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index ae6608b10bb5..ff6a709b5042 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/ieee80211.h>
@@ -2392,6 +2392,23 @@ static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc,
rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
break;
+ case RX_MSDU_START_PKT_TYPE_11BE:
+ rx_status->rate_idx = rate_mcs;
+
+ if (rate_mcs > ATH12K_EHT_MCS_MAX) {
+ ath12k_warn(ar->ab,
+ "Received with invalid mcs in EHT mode %d\n",
+ rate_mcs);
+ break;
+ }
+
+ rx_status->encoding = RX_ENC_EHT;
+ rx_status->nss = nss;
+ rx_status->eht.gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi);
+ rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
+ break;
+ default:
+ break;
}
}
@@ -2486,7 +2503,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
spin_unlock_bh(&ab->base_lock);
ath12k_dbg(ab, ATH12K_DBG_DATA,
- "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
msdu,
msdu->len,
peer ? peer->addr : NULL,
@@ -2497,6 +2514,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
(status->encoding == RX_ENC_HT) ? "ht" : "",
(status->encoding == RX_ENC_VHT) ? "vht" : "",
(status->encoding == RX_ENC_HE) ? "he" : "",
+ (status->encoding == RX_ENC_EHT) ? "eht" : "",
(status->bw == RATE_INFO_BW_40) ? "40" : "",
(status->bw == RATE_INFO_BW_80) ? "80" : "",
(status->bw == RATE_INFO_BW_160) ? "160" : "",
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h
index 1ce82088c954..c0aa965f47e7 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_RX_H
#define ATH12K_DP_RX_H
@@ -79,6 +79,9 @@ static inline u32 ath12k_he_gi_to_nl80211_he_gi(u8 sgi)
case RX_MSDU_START_SGI_3_2_US:
ret = NL80211_RATE_INFO_HE_GI_3_2;
break;
+ default:
+ ret = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
}
return ret;
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
index 1fffabaca527..7dc357628785 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
@@ -8,6 +8,8 @@
#include "dp_tx.h"
#include "debug.h"
#include "hw.h"
+#include "peer.h"
+#include "mac.h"
static enum hal_tcl_encap_type
ath12k_dp_tx_get_encap_type(struct ath12k_link_vif *arvif, struct sk_buff *skb)
@@ -117,7 +119,7 @@ static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab,
le32_encode_bits(ti->data_len,
HAL_TX_MSDU_EXT_INFO1_BUF_LEN);
- tcl_ext_cmd->info1 = le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
+ tcl_ext_cmd->info1 |= le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
le32_encode_bits(ti->encap_type,
HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE) |
le32_encode_bits(ti->encrypt_type,
@@ -560,13 +562,13 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
switch (wbm_status) {
case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
- case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
- case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
ts.ack_rssi = le32_get_bits(status_desc->info2,
HTT_TX_WBM_COMP_INFO2_ACK_RSSI);
ath12k_dp_tx_htt_tx_complete_buf(ab, msdu, tx_ring, &ts);
break;
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring);
@@ -582,6 +584,124 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
}
}
+static void ath12k_dp_tx_update_txcompl(struct ath12k *ar, struct hal_tx_status *ts)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_peer *peer;
+ struct ieee80211_sta *sta;
+ struct ath12k_sta *ahsta;
+ struct ath12k_link_sta *arsta;
+ struct rate_info txrate = {0};
+ u16 rate, ru_tones;
+ u8 rate_idx = 0;
+ int ret;
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find_by_id(ab, ts->peer_id);
+ if (!peer || !peer->sta) {
+ ath12k_dbg(ab, ATH12K_DBG_DP_TX,
+ "failed to find the peer by id %u\n", ts->peer_id);
+ spin_unlock_bh(&ab->base_lock);
+ return;
+ }
+ sta = peer->sta;
+ ahsta = ath12k_sta_to_ahsta(sta);
+ arsta = &ahsta->deflink;
+
+ /* This is to prefer choose the real NSS value arsta->last_txrate.nss,
+ * if it is invalid, then choose the NSS value while assoc.
+ */
+ if (arsta->last_txrate.nss)
+ txrate.nss = arsta->last_txrate.nss;
+ else
+ txrate.nss = arsta->peer_nss;
+ spin_unlock_bh(&ab->base_lock);
+
+ switch (ts->pkt_type) {
+ case HAL_TX_RATE_STATS_PKT_TYPE_11A:
+ case HAL_TX_RATE_STATS_PKT_TYPE_11B:
+ ret = ath12k_mac_hw_ratecode_to_legacy_rate(ts->mcs,
+ ts->pkt_type,
+ &rate_idx,
+ &rate);
+ if (ret < 0) {
+ ath12k_warn(ab, "Invalid tx legacy rate %d\n", ret);
+ return;
+ }
+
+ txrate.legacy = rate;
+ break;
+ case HAL_TX_RATE_STATS_PKT_TYPE_11N:
+ if (ts->mcs > ATH12K_HT_MCS_MAX) {
+ ath12k_warn(ab, "Invalid HT mcs index %d\n", ts->mcs);
+ return;
+ }
+
+ if (txrate.nss != 0)
+ txrate.mcs = ts->mcs + 8 * (txrate.nss - 1);
+
+ txrate.flags = RATE_INFO_FLAGS_MCS;
+
+ if (ts->sgi)
+ txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case HAL_TX_RATE_STATS_PKT_TYPE_11AC:
+ if (ts->mcs > ATH12K_VHT_MCS_MAX) {
+ ath12k_warn(ab, "Invalid VHT mcs index %d\n", ts->mcs);
+ return;
+ }
+
+ txrate.mcs = ts->mcs;
+ txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+
+ if (ts->sgi)
+ txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case HAL_TX_RATE_STATS_PKT_TYPE_11AX:
+ if (ts->mcs > ATH12K_HE_MCS_MAX) {
+ ath12k_warn(ab, "Invalid HE mcs index %d\n", ts->mcs);
+ return;
+ }
+
+ txrate.mcs = ts->mcs;
+ txrate.flags = RATE_INFO_FLAGS_HE_MCS;
+ txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(ts->sgi);
+ break;
+ case HAL_TX_RATE_STATS_PKT_TYPE_11BE:
+ if (ts->mcs > ATH12K_EHT_MCS_MAX) {
+ ath12k_warn(ab, "Invalid EHT mcs index %d\n", ts->mcs);
+ return;
+ }
+
+ txrate.mcs = ts->mcs;
+ txrate.flags = RATE_INFO_FLAGS_EHT_MCS;
+ txrate.eht_gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(ts->sgi);
+ break;
+ default:
+ ath12k_warn(ab, "Invalid tx pkt type: %d\n", ts->pkt_type);
+ return;
+ }
+
+ txrate.bw = ath12k_mac_bw_to_mac80211_bw(ts->bw);
+
+ if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
+ txrate.bw = RATE_INFO_BW_HE_RU;
+ ru_tones = ath12k_mac_he_convert_tones_to_ru_tones(ts->tones);
+ txrate.he_ru_alloc =
+ ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
+ }
+
+ if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11BE) {
+ txrate.bw = RATE_INFO_BW_EHT_RU;
+ txrate.eht_ru_alloc =
+ ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(ts->tones);
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ arsta->txrate = txrate;
+ spin_unlock_bh(&ab->base_lock);
+}
+
static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
struct sk_buff *msdu,
struct hal_tx_status *ts)
@@ -660,6 +780,8 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
* Might end up reporting it out-of-band from HTT stats.
*/
+ ath12k_dp_tx_update_txcompl(ar, ts);
+
ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu);
exit:
@@ -670,6 +792,8 @@ static void ath12k_dp_tx_status_parse(struct ath12k_base *ab,
struct hal_wbm_completion_ring_tx *desc,
struct hal_tx_status *ts)
{
+ u32 info0 = le32_to_cpu(desc->rate_stats.info0);
+
ts->buf_rel_source =
le32_get_bits(desc->info0, HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE);
if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
@@ -684,10 +808,17 @@ static void ath12k_dp_tx_status_parse(struct ath12k_base *ab,
ts->ppdu_id = le32_get_bits(desc->info1,
HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER);
- if (le32_to_cpu(desc->rate_stats.info0) & HAL_TX_RATE_STATS_INFO0_VALID)
- ts->rate_stats = le32_to_cpu(desc->rate_stats.info0);
- else
- ts->rate_stats = 0;
+
+ ts->peer_id = le32_get_bits(desc->info3, HAL_WBM_COMPL_TX_INFO3_PEER_ID);
+
+ if (info0 & HAL_TX_RATE_STATS_INFO0_VALID) {
+ ts->pkt_type = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_PKT_TYPE);
+ ts->mcs = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_MCS);
+ ts->sgi = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_SGI);
+ ts->bw = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_BW);
+ ts->tones = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_TONES_IN_RU);
+ ts->ofdma = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_OFDMA_TX);
+ }
}
void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h
index 7b0403d245e5..a102d27e5785 100644
--- a/drivers/net/wireless/ath/ath12k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath12k/hal_desc.h
@@ -2968,7 +2968,7 @@ struct hal_mon_buf_ring {
#define HAL_MON_DEST_COOKIE_BUF_ID GENMASK(17, 0)
-#define HAL_MON_DEST_INFO0_END_OFFSET GENMASK(15, 0)
+#define HAL_MON_DEST_INFO0_END_OFFSET GENMASK(11, 0)
#define HAL_MON_DEST_INFO0_FLUSH_DETECTED BIT(16)
#define HAL_MON_DEST_INFO0_END_OF_PPDU BIT(17)
#define HAL_MON_DEST_INFO0_INITIATOR BIT(18)
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.h b/drivers/net/wireless/ath/ath12k/hal_rx.h
index 54f3eaeca8bb..ac3b3f17ec2c 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HAL_RX_H
@@ -146,6 +146,8 @@ struct hal_rx_user_status {
u32 mpdu_fcs_ok_bitmap[HAL_RX_NUM_WORDS_PER_PPDU_BITMAP];
u32 mpdu_ok_byte_count;
u32 mpdu_err_byte_count;
+ bool ampdu_present;
+ u16 ampdu_id;
};
#define HAL_MAX_UL_MU_USERS 37
@@ -230,7 +232,6 @@ struct hal_rx_mon_ppdu_info {
u8 addr4[ETH_ALEN];
struct hal_rx_user_status userstats[HAL_MAX_UL_MU_USERS];
u8 userid;
- u16 ampdu_id[HAL_MAX_UL_MU_USERS];
bool first_msdu_in_mpdu;
bool is_ampdu;
u8 medium_prot_type;
@@ -665,6 +666,9 @@ enum nl80211_he_ru_alloc ath12k_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones)
case RU_996:
ret = NL80211_RATE_INFO_HE_RU_ALLOC_996;
break;
+ case RU_2X996:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+ break;
case RU_26:
fallthrough;
default:
diff --git a/drivers/net/wireless/ath/ath12k/hal_tx.h b/drivers/net/wireless/ath/ath12k/hal_tx.h
index 3cf5973771d7..eb065a79f6c6 100644
--- a/drivers/net/wireless/ath/ath12k/hal_tx.h
+++ b/drivers/net/wireless/ath/ath12k/hal_tx.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc.
+ * All rights reserved.
*/
#ifndef ATH12K_HAL_TX_H
@@ -63,7 +64,12 @@ struct hal_tx_status {
u8 try_cnt;
u8 tid;
u16 peer_id;
- u32 rate_stats;
+ enum hal_tx_rate_stats_pkt_type pkt_type;
+ enum hal_tx_rate_stats_sgi sgi;
+ enum ath12k_supported_bw bw;
+ u8 mcs;
+ u16 tones;
+ u8 ofdma;
};
#define HAL_TX_PHY_DESC_INFO0_BF_TYPE GENMASK(17, 16)
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 9c3e66dbe0c3..5f6d9896ef61 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -337,6 +337,82 @@ static const char *ath12k_mac_phymode_str(enum wmi_phy_mode mode)
return "<unknown>";
}
+u16 ath12k_mac_he_convert_tones_to_ru_tones(u16 tones)
+{
+ switch (tones) {
+ case 26:
+ return RU_26;
+ case 52:
+ return RU_52;
+ case 106:
+ return RU_106;
+ case 242:
+ return RU_242;
+ case 484:
+ return RU_484;
+ case 996:
+ return RU_996;
+ case (996 * 2):
+ return RU_2X996;
+ default:
+ return RU_26;
+ }
+}
+
+enum nl80211_eht_gi ath12k_mac_eht_gi_to_nl80211_eht_gi(u8 sgi)
+{
+ switch (sgi) {
+ case RX_MSDU_START_SGI_0_8_US:
+ return NL80211_RATE_INFO_EHT_GI_0_8;
+ case RX_MSDU_START_SGI_1_6_US:
+ return NL80211_RATE_INFO_EHT_GI_1_6;
+ case RX_MSDU_START_SGI_3_2_US:
+ return NL80211_RATE_INFO_EHT_GI_3_2;
+ default:
+ return NL80211_RATE_INFO_EHT_GI_0_8;
+ }
+}
+
+enum nl80211_eht_ru_alloc ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(u16 ru_tones)
+{
+ switch (ru_tones) {
+ case 26:
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_26;
+ case 52:
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_52;
+ case (52 + 26):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_52P26;
+ case 106:
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_106;
+ case (106 + 26):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_106P26;
+ case 242:
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_242;
+ case 484:
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_484;
+ case (484 + 242):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_484P242;
+ case 996:
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_996;
+ case (996 + 484):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_996P484;
+ case (996 + 484 + 242):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242;
+ case (2 * 996):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_2x996;
+ case (2 * 996 + 484):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484;
+ case (3 * 996):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_3x996;
+ case (3 * 996 + 484):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484;
+ case (4 * 996):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_4x996;
+ default:
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_26;
+ }
+}
+
enum rate_info_bw
ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw)
{
@@ -3116,6 +3192,7 @@ static void ath12k_peer_assoc_prepare(struct ath12k *ar,
ath12k_peer_assoc_h_smps(arsta, arg);
ath12k_peer_assoc_h_mlo(arsta, arg);
+ arsta->peer_nss = arg->peer_nss;
/* TODO: amsdu_disable req? */
}
@@ -4534,9 +4611,6 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd,
struct ath12k_link_sta *arsta,
struct ieee80211_key_conf *key)
{
- struct ath12k_vif *ahvif = arvif->ahvif;
- struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
- struct ieee80211_bss_conf *link_conf;
struct ieee80211_sta *sta = NULL;
struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
@@ -4553,19 +4627,10 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd,
if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
return 1;
- link_conf = ath12k_mac_get_link_bss_conf(arvif);
- if (!link_conf) {
- ath12k_warn(ab, "unable to access bss link conf in set key for vif %pM link %u\n",
- vif->addr, arvif->link_id);
- return -ENOLINK;
- }
-
if (sta)
peer_addr = arsta->addr;
- else if (ahvif->vdev_type == WMI_VDEV_TYPE_STA)
- peer_addr = link_conf->bssid;
else
- peer_addr = link_conf->addr;
+ peer_addr = arvif->bssid;
key->hw_key_idx = key->keyidx;
@@ -10054,6 +10119,8 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
sinfo->txrate.he_gi = arsta->txrate.he_gi;
sinfo->txrate.he_dcm = arsta->txrate.he_dcm;
sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc;
+ sinfo->txrate.eht_gi = arsta->txrate.eht_gi;
+ sinfo->txrate.eht_ru_alloc = arsta->txrate.eht_ru_alloc;
}
sinfo->txrate.flags = arsta->txrate.flags;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
@@ -11140,6 +11207,9 @@ static int __ath12k_mac_mlo_setup(struct ath12k *ar)
}
}
+ if (num_link == 0)
+ return 0;
+
mlo.group_id = cpu_to_le32(ag->id);
mlo.partner_link_id = partner_link_id;
mlo.num_partner_links = num_link;
@@ -11169,10 +11239,16 @@ static int __ath12k_mac_mlo_teardown(struct ath12k *ar)
{
struct ath12k_base *ab = ar->ab;
int ret;
+ u8 num_link;
if (test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags))
return 0;
+ num_link = ath12k_get_num_partner_link(ar);
+
+ if (num_link == 0)
+ return 0;
+
ret = ath12k_wmi_mlo_teardown(ar);
if (ret) {
ath12k_warn(ab, "failed to send MLO teardown WMI command for pdev %d: %d\n",
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index 3594729b6397..1acaf3f68292 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_MAC_H
@@ -108,5 +108,8 @@ int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif);
void ath12k_mac_get_any_chanctx_conf_iter(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *conf,
void *data);
+u16 ath12k_mac_he_convert_tones_to_ru_tones(u16 tones);
+enum nl80211_eht_ru_alloc ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(u16 ru_tones);
+enum nl80211_eht_gi ath12k_mac_eht_gi_to_nl80211_eht_gi(u8 sgi);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index ee14b8484548..895b2314d1d5 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -483,8 +483,11 @@ static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
ath12k_pci_ext_grp_disable(irq_grp);
- napi_synchronize(&irq_grp->napi);
- napi_disable(&irq_grp->napi);
+ if (irq_grp->napi_enabled) {
+ napi_synchronize(&irq_grp->napi);
+ napi_disable(&irq_grp->napi);
+ irq_grp->napi_enabled = false;
+ }
}
}
@@ -1114,7 +1117,11 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
- napi_enable(&irq_grp->napi);
+ if (!irq_grp->napi_enabled) {
+ napi_enable(&irq_grp->napi);
+ irq_grp->napi_enabled = true;
+ }
+
ath12k_pci_ext_grp_enable(irq_grp);
}
diff --git a/drivers/net/wireless/ath/ath12k/rx_desc.h b/drivers/net/wireless/ath/ath12k/rx_desc.h
index 10366bbe9999..7367935ee68b 100644
--- a/drivers/net/wireless/ath/ath12k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath12k/rx_desc.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_RX_DESC_H
#define ATH12K_RX_DESC_H
@@ -637,6 +637,8 @@ enum rx_msdu_start_pkt_type {
RX_MSDU_START_PKT_TYPE_11N,
RX_MSDU_START_PKT_TYPE_11AC,
RX_MSDU_START_PKT_TYPE_11AX,
+ RX_MSDU_START_PKT_TYPE_11BA,
+ RX_MSDU_START_PKT_TYPE_11BE,
};
enum rx_msdu_start_sgi {
@@ -1546,5 +1548,6 @@ struct hal_rx_desc {
#define RU_242 9
#define RU_484 18
#define RU_996 37
+#define RU_2X996 74
#endif /* ATH12K_RX_DESC_H */
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 7a87777e0a04..9cd7ceae5a4f 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -2373,8 +2373,8 @@ void ath12k_wmi_start_scan_init(struct ath12k *ar,
arg->dwell_time_active = 50;
arg->dwell_time_active_2g = 0;
arg->dwell_time_passive = 150;
- arg->dwell_time_active_6g = 40;
- arg->dwell_time_passive_6g = 30;
+ arg->dwell_time_active_6g = 70;
+ arg->dwell_time_passive_6g = 70;
arg->min_rest_time = 50;
arg->max_rest_time = 500;
arg->repeat_probe_time = 0;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index f9e77c4624d9..01e0dffbf57e 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -647,7 +647,9 @@ static int ath9k_of_init(struct ath_softc *sc)
ah->ah_flags |= AH_NO_EEP_SWAP;
}
- of_get_mac_address(np, common->macaddr);
+ ret = of_get_mac_address(np, common->macaddr);
+ if (ret == -EPROBE_DEFER)
+ return ret;
return 0;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 2821c27f317e..d06c724f63d9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -896,14 +896,16 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
}
/* 1) Prepare USB boot loader for runtime image */
- brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state));
+ err = brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state));
+ if (err)
+ goto fail;
rdlstate = le32_to_cpu(state.state);
rdlbytes = le32_to_cpu(state.bytes);
/* 2) Check we are in the Waiting state */
if (rdlstate != DL_WAITING) {
- brcmf_err("Failed to DL_START\n");
+ brcmf_err("Invalid DL state: %u\n", rdlstate);
err = -EINVAL;
goto fail;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
index ab7c0f8d54f4..d3542af0f625 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
@@ -148,11 +148,9 @@ const struct iwl_cfg_trans_params iwl_br_trans_cfg = {
.mq_rx_supported = true,
.rf_id = true,
.gen2 = true,
- .integrated = true,
.umac_prph_offset = 0x300000,
.xtal_latency = 12000,
.low_latency_xtal = true,
- .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
const char iwl_br_name[] = "Intel(R) TBD Br device";
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 6594216f873c..cd284767ff4b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -2691,7 +2691,7 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
}
/* collect DRAM_IMR region in the last */
if (imr_reg_data.reg_tlv)
- size += iwl_dump_ini_mem(fwrt, list, &reg_data,
+ size += iwl_dump_ini_mem(fwrt, list, &imr_reg_data,
&iwl_dump_ini_region_ops[IWL_FW_INI_REGION_DRAM_IMR]);
if (size) {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index 434eed4130b9..91bcff499311 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright(c) 2021-2024 Intel Corporation
+ * Copyright(c) 2021-2025 Intel Corporation
*/
#include "iwl-drv.h"
@@ -678,8 +678,10 @@ int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
struct uefi_cnv_var_eckv *data;
int ret = 0;
- data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_ECKV_NAME,
- "ECKV", sizeof(*data), NULL);
+ data = iwl_uefi_get_verified_variable_guid(fwrt->trans,
+ &IWL_EFI_WIFI_BT_GUID,
+ IWL_UEFI_ECKV_NAME,
+ "ECKV", sizeof(*data), NULL);
if (IS_ERR(data))
return -EINVAL;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
index 0c8943a8bd01..eb3c05417da3 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright(c) 2021-2024 Intel Corporation
+ * Copyright(c) 2021-2025 Intel Corporation
*/
#ifndef __iwl_fw_uefi__
#define __iwl_fw_uefi__
@@ -19,7 +19,7 @@
#define IWL_UEFI_WTAS_NAME L"UefiCnvWlanWTAS"
#define IWL_UEFI_SPLC_NAME L"UefiCnvWlanSPLC"
#define IWL_UEFI_WRDD_NAME L"UefiCnvWlanWRDD"
-#define IWL_UEFI_ECKV_NAME L"UefiCnvWlanECKV"
+#define IWL_UEFI_ECKV_NAME L"UefiCnvCommonECKV"
#define IWL_UEFI_DSM_NAME L"UefiCnvWlanGeneralCfg"
#define IWL_UEFI_WBEM_NAME L"UefiCnvWlanWBEM"
#define IWL_UEFI_PUNCTURING_NAME L"UefiCnvWlanPuncturing"
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index be9e464c9b7b..3ff493e920d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -148,6 +148,7 @@
* during a error FW error.
*/
#define CSR_FUNC_SCRATCH_INIT_VALUE (0x01010101)
+#define CSR_FUNC_SCRATCH_POWER_OFF_MASK 0xFFFF
/* Bits for CSR_HW_IF_CONFIG_REG */
#define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH (0x0000000F)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 08d990ba8a79..ce787326aa69 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/firmware.h>
#include "iwl-drv.h"
@@ -1372,15 +1372,15 @@ void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
switch (tp_id) {
case IWL_FW_INI_TIME_POINT_EARLY:
iwl_dbg_tlv_init_cfg(fwrt);
- iwl_dbg_tlv_apply_config(fwrt, conf_list);
iwl_dbg_tlv_update_drams(fwrt);
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
+ iwl_dbg_tlv_apply_config(fwrt, conf_list);
break;
case IWL_FW_INI_TIME_POINT_AFTER_ALIVE:
iwl_dbg_tlv_apply_buffers(fwrt);
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
- iwl_dbg_tlv_apply_config(fwrt, conf_list);
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
+ iwl_dbg_tlv_apply_config(fwrt, conf_list);
break;
case IWL_FW_INI_TIME_POINT_PERIODIC:
iwl_dbg_tlv_set_periodic_trigs(fwrt);
@@ -1390,14 +1390,14 @@ void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
case IWL_FW_INI_TIME_POINT_MISSED_BEACONS:
case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION:
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
- iwl_dbg_tlv_apply_config(fwrt, conf_list);
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data,
iwl_dbg_tlv_check_fw_pkt);
+ iwl_dbg_tlv_apply_config(fwrt, conf_list);
break;
default:
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
- iwl_dbg_tlv_apply_config(fwrt, conf_list);
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
+ iwl_dbg_tlv_apply_config(fwrt, conf_list);
break;
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 47854a36413e..75571f8693ee 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2021, 2023-2024 Intel Corporation
+ * Copyright (C) 2019-2021, 2023-2025 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/bsearch.h>
@@ -21,6 +21,7 @@ struct iwl_trans_dev_restart_data {
struct list_head list;
unsigned int restart_count;
time64_t last_error;
+ bool backoff;
char name[];
};
@@ -125,13 +126,20 @@ iwl_trans_determine_restart_mode(struct iwl_trans *trans)
if (!data)
return at_least;
- if (ktime_get_boottime_seconds() - data->last_error >=
+ if (!data->backoff &&
+ ktime_get_boottime_seconds() - data->last_error >=
IWL_TRANS_RESET_OK_TIME)
data->restart_count = 0;
index = data->restart_count;
- if (index >= ARRAY_SIZE(escalation_list))
+ if (index >= ARRAY_SIZE(escalation_list)) {
index = ARRAY_SIZE(escalation_list) - 1;
+ if (!data->backoff) {
+ data->backoff = true;
+ return IWL_RESET_MODE_BACKOFF;
+ }
+ data->backoff = false;
+ }
return max(at_least, escalation_list[index]);
}
@@ -140,7 +148,8 @@ iwl_trans_determine_restart_mode(struct iwl_trans *trans)
static void iwl_trans_restart_wk(struct work_struct *wk)
{
- struct iwl_trans *trans = container_of(wk, typeof(*trans), restart.wk);
+ struct iwl_trans *trans = container_of(wk, typeof(*trans),
+ restart.wk.work);
struct iwl_trans_reprobe *reprobe;
enum iwl_reset_mode mode;
@@ -168,6 +177,12 @@ static void iwl_trans_restart_wk(struct work_struct *wk)
return;
mode = iwl_trans_determine_restart_mode(trans);
+ if (mode == IWL_RESET_MODE_BACKOFF) {
+ IWL_ERR(trans, "Too many device errors - delay next reset\n");
+ queue_delayed_work(system_unbound_wq, &trans->restart.wk,
+ IWL_TRANS_RESET_DELAY);
+ return;
+ }
iwl_trans_inc_restart_count(trans->dev);
@@ -227,7 +242,7 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
trans->dev = dev;
trans->num_rx_queues = 1;
- INIT_WORK(&trans->restart.wk, iwl_trans_restart_wk);
+ INIT_DELAYED_WORK(&trans->restart.wk, iwl_trans_restart_wk);
return trans;
}
@@ -271,7 +286,7 @@ int iwl_trans_init(struct iwl_trans *trans)
void iwl_trans_free(struct iwl_trans *trans)
{
- cancel_work_sync(&trans->restart.wk);
+ cancel_delayed_work_sync(&trans->restart.wk);
kmem_cache_destroy(trans->dev_cmd_pool);
}
@@ -403,7 +418,7 @@ void iwl_trans_op_mode_leave(struct iwl_trans *trans)
iwl_trans_pcie_op_mode_leave(trans);
- cancel_work_sync(&trans->restart.wk);
+ cancel_delayed_work_sync(&trans->restart.wk);
trans->op_mode = NULL;
@@ -540,7 +555,6 @@ void __releases(nic_access)
iwl_trans_release_nic_access(struct iwl_trans *trans)
{
iwl_trans_pcie_release_nic_access(trans);
- __release(nic_access);
}
IWL_EXPORT_SYMBOL(iwl_trans_release_nic_access);
@@ -641,6 +655,9 @@ IWL_EXPORT_SYMBOL(iwl_trans_tx);
void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn,
struct sk_buff_head *skbs, bool is_flush)
{
+ if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
+ return;
+
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"bad state = %d\n", trans->state))
return;
@@ -673,6 +690,9 @@ IWL_EXPORT_SYMBOL(iwl_trans_txq_enable_cfg);
int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
{
+ if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
+ return -EIO;
+
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"bad state = %d\n", trans->state))
return -EIO;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index f6234065dbdd..9c64e1fd4c09 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -958,7 +958,7 @@ struct iwl_trans {
struct iwl_dma_ptr invalid_tx_cmd;
struct {
- struct work_struct wk;
+ struct delayed_work wk;
struct iwl_fw_error_dump_mode mode;
bool during_reset;
} restart;
@@ -1159,7 +1159,7 @@ static inline void iwl_trans_schedule_reset(struct iwl_trans *trans,
*/
trans->restart.during_reset = test_bit(STATUS_IN_SW_RESET,
&trans->status);
- queue_work(system_unbound_wq, &trans->restart.wk);
+ queue_delayed_work(system_unbound_wq, &trans->restart.wk, 0);
}
static inline void iwl_trans_fw_error(struct iwl_trans *trans,
@@ -1258,6 +1258,9 @@ enum iwl_reset_mode {
IWL_RESET_MODE_RESCAN,
IWL_RESET_MODE_FUNC_RESET,
IWL_RESET_MODE_PROD_RESET,
+
+ /* keep last - special backoff value */
+ IWL_RESET_MODE_BACKOFF,
};
void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index b26141c30c61..0a96c26e741b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -773,7 +773,11 @@ iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
target.bssid = bssid;
target.cipher = cipher;
+ target.tk = NULL;
ieee80211_iter_keys(mvm->hw, vif, iter, &target);
+
+ if (!WARN_ON(!target.tk))
+ memcpy(tk, target.tk, TK_11AZ_LEN);
} else {
memcpy(tk, entry->tk, sizeof(entry->tk));
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index af6644b7e95f..e17ad647da48 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -4099,6 +4099,20 @@ iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm,
return 0;
}
+void iwl_mvm_smps_workaround(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool update)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (!iwl_mvm_has_rlc_offload(mvm))
+ return;
+
+ mvmvif->ps_disabled = !vif->cfg.ps;
+
+ if (update)
+ iwl_mvm_power_update_mac(mvm);
+}
+
/* Common part for MLD and non-MLD modes */
int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -4191,6 +4205,7 @@ int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw,
new_state == IEEE80211_STA_AUTHORIZED) {
ret = iwl_mvm_sta_state_assoc_to_authorized(mvm, vif, sta,
callbacks);
+ iwl_mvm_smps_workaround(mvm, vif, true);
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
ret = iwl_mvm_sta_state_authorized_to_assoc(mvm, vif, sta,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 341a2a7a49ec..78d7153a0cfc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022-2024 Intel Corporation
+ * Copyright (C) 2022-2025 Intel Corporation
*/
#include "mvm.h"
@@ -887,6 +887,7 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
}
if (changes & BSS_CHANGED_PS) {
+ iwl_mvm_smps_workaround(mvm, vif, false);
ret = iwl_mvm_power_update_mac(mvm);
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index ee769da72e68..211f542ec855 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -3043,4 +3043,7 @@ iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
bool is_ap);
+
+void iwl_mvm_smps_workaround(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool update);
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index e0b657b2f74b..69cf46c79b4b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -587,6 +587,8 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x7A70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
IWL_DEV_INFO(0x7AF0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
IWL_DEV_INFO(0x7AF0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
+ IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
+ IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name),
IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma, iwl_ax411_killer_1690s_name),
@@ -1702,11 +1704,27 @@ static int _iwl_pci_resume(struct device *device, bool restore)
* Scratch value was altered, this means the device was powered off, we
* need to reset it completely.
* Note: MAC (bits 0:7) will be cleared upon suspend even with wowlan,
- * so assume that any bits there mean that the device is usable.
+ * but not bits [15:8]. So if we have bits set in lower word, assume
+ * the device is alive.
+ * For older devices, just try silently to grab the NIC.
*/
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ &&
- !iwl_read32(trans, CSR_FUNC_SCRATCH))
- device_was_powered_off = true;
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ if (!(iwl_read32(trans, CSR_FUNC_SCRATCH) &
+ CSR_FUNC_SCRATCH_POWER_OFF_MASK))
+ device_was_powered_off = true;
+ } else {
+ /*
+ * bh are re-enabled by iwl_trans_pcie_release_nic_access,
+ * so re-enable them if _iwl_trans_pcie_grab_nic_access fails.
+ */
+ local_bh_disable();
+ if (_iwl_trans_pcie_grab_nic_access(trans, true)) {
+ iwl_trans_pcie_release_nic_access(trans);
+ } else {
+ device_was_powered_off = true;
+ local_bh_enable();
+ }
+ }
if (restore || device_was_powered_off) {
trans->state = IWL_TRANS_NO_FW;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 45460f93d24a..114a9195ad7f 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -558,10 +558,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,
struct device *dev);
-bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
-#define _iwl_trans_pcie_grab_nic_access(trans) \
+bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent);
+#define _iwl_trans_pcie_grab_nic_access(trans, silent) \
__cond_lock(nic_access_nobh, \
- likely(__iwl_trans_pcie_grab_nic_access(trans)))
+ likely(__iwl_trans_pcie_grab_nic_access(trans, silent)))
void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev);
void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev);
@@ -1105,7 +1105,8 @@ void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
u32 *val);
bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
-void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans);
+void __releases(nic_access_nobh)
+iwl_trans_pcie_release_nic_access(struct iwl_trans *trans);
/* transport gen 1 exported functions */
void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index c917ed4c19bc..102a6123bba0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -2351,7 +2351,8 @@ void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode)
struct iwl_trans_pcie_removal *removal;
char _msg = 0, *msg = &_msg;
- if (WARN_ON(mode < IWL_RESET_MODE_REMOVE_ONLY))
+ if (WARN_ON(mode < IWL_RESET_MODE_REMOVE_ONLY ||
+ mode == IWL_RESET_MODE_BACKOFF))
return;
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
@@ -2405,7 +2406,7 @@ EXPORT_SYMBOL(iwl_trans_pcie_reset);
* This version doesn't disable BHs but rather assumes they're
* already disabled.
*/
-bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
+bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)
{
int ret;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2457,6 +2458,11 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
if (unlikely(ret < 0)) {
u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
+ if (silent) {
+ spin_unlock(&trans_pcie->reg_lock);
+ return false;
+ }
+
WARN_ONCE(1,
"Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
cntrl);
@@ -2488,7 +2494,7 @@ bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
bool ret;
local_bh_disable();
- ret = __iwl_trans_pcie_grab_nic_access(trans);
+ ret = __iwl_trans_pcie_grab_nic_access(trans, false);
if (ret) {
/* keep BHs disabled until iwl_trans_pcie_release_nic_access */
return ret;
@@ -2497,7 +2503,8 @@ bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
return false;
}
-void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
+void __releases(nic_access_nobh)
+iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2524,6 +2531,7 @@ void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
* scheduled on different CPUs (after we drop reg_lock).
*/
out:
+ __release(nic_access_nobh);
spin_unlock_bh(&trans_pcie->reg_lock);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 7c1dd5cc084a..83c6fcafcf1a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1021,7 +1021,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
* returned. This needs to be done only on NICs that have
* apmg_wake_up_wa set (see above.)
*/
- if (!_iwl_trans_pcie_grab_nic_access(trans))
+ if (!_iwl_trans_pcie_grab_nic_access(trans, false))
return -EIO;
/*
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 66f0f5377ac1..738bafc3749b 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -403,12 +403,14 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
bss_desc->bcn_ht_oper->ht_param &
- IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)
+ IEEE80211_HT_PARAM_CHAN_WIDTH_ANY) {
+ chan_list->chan_scan_param[0].radio_type |=
+ CHAN_BW_40MHZ << 2;
SET_SECONDARYCHAN(chan_list->chan_scan_param[0].
radio_type,
(bss_desc->bcn_ht_oper->ht_param &
IEEE80211_HT_PARAM_CHA_SEC_OFFSET));
-
+ }
*buffer += struct_size(chan_list, chan_scan_param, 1);
ret_len += struct_size(chan_list, chan_scan_param, 1);
}
diff --git a/drivers/net/wireless/mediatek/mt76/channel.c b/drivers/net/wireless/mediatek/mt76/channel.c
index 6a35c6ebd823..e7b839e74290 100644
--- a/drivers/net/wireless/mediatek/mt76/channel.c
+++ b/drivers/net/wireless/mediatek/mt76/channel.c
@@ -293,6 +293,7 @@ struct mt76_vif_link *mt76_get_vif_phy_link(struct mt76_phy *phy,
kfree(mlink);
return ERR_PTR(ret);
}
+ rcu_assign_pointer(mvif->offchannel_link, mlink);
return mlink;
}
@@ -301,10 +302,12 @@ void mt76_put_vif_phy_link(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct mt76_vif_link *mlink)
{
struct mt76_dev *dev = phy->dev;
+ struct mt76_vif_data *mvif = mlink->mvif;
if (IS_ERR_OR_NULL(mlink) || !mlink->offchannel)
return;
+ rcu_assign_pointer(mvif->offchannel_link, NULL);
dev->drv->vif_link_remove(phy, vif, &vif->bss_conf, mlink);
kfree(mlink);
}
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 844af16ee551..35b4ec91979e 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -1011,6 +1011,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
int i;
mt76_worker_disable(&dev->tx_worker);
+ napi_disable(&dev->tx_napi);
netif_napi_del(&dev->tx_napi);
for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 05651efb549e..e4ecd9cde36d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -351,6 +351,7 @@ struct mt76_wcid {
u8 hw_key_idx;
u8 hw_key_idx2;
+ u8 offchannel:1;
u8 sta:1;
u8 sta_disabled:1;
u8 amsdu:1;
@@ -491,6 +492,7 @@ struct mt76_hw_cap {
#define MT_DRV_RX_DMA_HDR BIT(3)
#define MT_DRV_HW_MGMT_TXQ BIT(4)
#define MT_DRV_AMSDU_OFFLOAD BIT(5)
+#define MT_DRV_IGNORE_TXS_FAILED BIT(6)
struct mt76_driver_ops {
u32 drv_flags;
@@ -787,6 +789,7 @@ struct mt76_vif_link {
struct mt76_vif_data {
struct mt76_vif_link __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+ struct mt76_vif_link __rcu *offchannel_link;
struct mt76_phy *roc_phy;
u16 valid_links;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
index db0c29e65185..487ad716f872 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
@@ -314,6 +314,9 @@ enum tx_frag_idx {
#define MT_TXFREE_INFO_COUNT GENMASK(27, 24)
#define MT_TXFREE_INFO_STAT GENMASK(29, 28)
+#define MT_TXS_HDR_SIZE 4 /* Unit: DW */
+#define MT_TXS_SIZE 12 /* Unit: DW */
+
#define MT_TXS0_BW GENMASK(31, 29)
#define MT_TXS0_TID GENMASK(28, 26)
#define MT_TXS0_AMPDU BIT(25)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index d0e49d68c5db..bafcf5a279e2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -391,7 +391,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
if (vif->type == NL80211_IFTYPE_STATION &&
- !is_zero_ether_addr(link_conf->bssid)) {
+ link_conf && !is_zero_ether_addr(link_conf->bssid)) {
memcpy(basic->peer_addr, link_conf->bssid, ETH_ALEN);
basic->aid = cpu_to_le16(vif->cfg.aid);
} else {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index b456ccd00d58..11c16d1fc70f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -156,7 +156,8 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
.drv_flags = MT_DRV_TX_ALIGNED4_SKBS |
- MT_DRV_SW_RX_AIRTIME,
+ MT_DRV_SW_RX_AIRTIME |
+ MT_DRV_IGNORE_TXS_FAILED,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
.set_channel = mt76x0_set_channel,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index b031c500b741..90e5666c0857 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -214,7 +214,8 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
const struct usb_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
- .drv_flags = MT_DRV_SW_RX_AIRTIME,
+ .drv_flags = MT_DRV_SW_RX_AIRTIME |
+ MT_DRV_IGNORE_TXS_FAILED,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
.set_channel = mt76x0_set_channel,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index 727bfdd00b40..2303019670e2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -22,7 +22,8 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
.drv_flags = MT_DRV_TX_ALIGNED4_SKBS |
- MT_DRV_SW_RX_AIRTIME,
+ MT_DRV_SW_RX_AIRTIME |
+ MT_DRV_IGNORE_TXS_FAILED,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
.set_channel = mt76x2e_set_channel,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index a4f4d12f904e..84ef80ab4afb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -30,7 +30,8 @@ static int mt76x2u_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
- .drv_flags = MT_DRV_SW_RX_AIRTIME,
+ .drv_flags = MT_DRV_SW_RX_AIRTIME |
+ MT_DRV_IGNORE_TXS_FAILED,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
.set_channel = mt76x2u_set_channel,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
index b8cd7cd3d832..59fa812b30d3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
@@ -348,14 +348,10 @@ mt7925_mcu_handle_hif_ctrl_basic(struct mt792x_dev *dev, struct tlv *tlv)
basic = (struct mt7925_mcu_hif_ctrl_basic_tlv *)tlv;
if (basic->hifsuspend) {
- if (basic->hif_tx_traffic_status == HIF_TRAFFIC_IDLE &&
- basic->hif_rx_traffic_status == HIF_TRAFFIC_IDLE)
- /* success */
- dev->hif_idle = true;
- else
- /* busy */
- /* invalid */
- dev->hif_idle = false;
+ dev->hif_idle = true;
+ if (!(basic->hif_tx_traffic_status == HIF_TRAFFIC_IDLE &&
+ basic->hif_rx_traffic_status == HIF_TRAFFIC_IDLE))
+ dev_info(dev->mt76.dev, "Hif traffic not idle.\n");
} else {
dev->hif_resumed = true;
}
@@ -631,6 +627,54 @@ int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev,
enable, false);
}
+static int mt7925_mcu_read_eeprom(struct mt792x_dev *dev, u32 offset, u8 *val)
+{
+ struct {
+ u8 rsv[4];
+
+ __le16 tag;
+ __le16 len;
+
+ __le32 addr;
+ __le32 valid;
+ u8 data[MT7925_EEPROM_BLOCK_SIZE];
+ } __packed req = {
+ .tag = cpu_to_le16(1),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .addr = cpu_to_le32(round_down(offset,
+ MT7925_EEPROM_BLOCK_SIZE)),
+ };
+ struct evt {
+ u8 rsv[4];
+
+ __le16 tag;
+ __le16 len;
+
+ __le32 ver;
+ __le32 addr;
+ __le32 valid;
+ __le32 size;
+ __le32 magic_num;
+ __le32 type;
+ __le32 rsv1[4];
+ u8 data[32];
+ } __packed *res;
+ struct sk_buff *skb;
+ int ret;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_WM_UNI_CMD_QUERY(EFUSE_CTRL),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ res = (struct evt *)skb->data;
+ *val = res->data[offset % MT7925_EEPROM_BLOCK_SIZE];
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
{
const struct mt76_connac2_fw_trailer *hdr;
@@ -639,13 +683,20 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
struct mt76_dev *mdev = &dev->mt76;
struct mt792x_phy *phy = &dev->phy;
const struct firmware *fw;
+ u8 *clc_base = NULL, hw_encap = 0;
int ret, i, len, offset = 0;
- u8 *clc_base = NULL;
if (mt7925_disable_clc ||
mt76_is_usb(&dev->mt76))
return 0;
+ if (mt76_is_mmio(&dev->mt76)) {
+ ret = mt7925_mcu_read_eeprom(dev, MT_EE_HW_TYPE, &hw_encap);
+ if (ret)
+ return ret;
+ hw_encap = u8_get_bits(hw_encap, MT_EE_HW_TYPE_ENCAP);
+ }
+
ret = request_firmware(&fw, fw_name, mdev->dev);
if (ret)
return ret;
@@ -690,6 +741,10 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
if (phy->clc[clc->idx])
continue;
+ /* header content sanity */
+ if (u8_get_bits(clc->type, MT_EE_HW_TYPE_ENCAP) != hw_encap)
+ continue;
+
phy->clc[clc->idx] = devm_kmemdup(mdev->dev, clc,
le32_to_cpu(clc->len),
GFP_KERNEL);
@@ -1867,14 +1922,14 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
mt7925_mcu_sta_mld_tlv(skb, info->vif, info->link_sta->sta);
mt7925_mcu_sta_eht_mld_tlv(skb, info->vif, info->link_sta->sta);
}
-
- mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->link_sta);
}
if (!info->enable) {
mt7925_mcu_sta_remove_tlv(skb);
mt76_connac_mcu_add_tlv(skb, STA_REC_MLD_OFF,
sizeof(struct tlv));
+ } else {
+ mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->link_sta);
}
return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
@@ -3239,6 +3294,9 @@ int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
else
uni_txd->option = MCU_CMD_UNI_EXT_ACK;
+ if (cmd == MCU_UNI_CMD(HIF_CTRL))
+ uni_txd->option &= ~MCU_CMD_ACK;
+
goto exit;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
index cb7b1a49fbd1..073e433069e0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
@@ -167,9 +167,12 @@ enum mt7925_eeprom_field {
MT_EE_CHIP_ID = 0x000,
MT_EE_VERSION = 0x002,
MT_EE_MAC_ADDR = 0x004,
+ MT_EE_HW_TYPE = 0xa71,
__MT_EE_MAX = 0x9ff
};
+#define MT_EE_HW_TYPE_ENCAP GENMASK(1, 0)
+
enum {
TXPWR_USER,
TXPWR_EEPROM,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index 019c925ae600..c7e833602733 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -832,7 +832,8 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
- struct mt76_vif_link *mvif;
+ struct mt76_vif_link *mlink = NULL;
+ struct mt7996_vif *mvif;
u16 tx_count = 15;
u32 val;
bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
@@ -840,11 +841,18 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
bool beacon = !!(changed & (BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED)) && (!inband_disc);
- mvif = vif ? (struct mt76_vif_link *)vif->drv_priv : NULL;
- if (mvif) {
- omac_idx = mvif->omac_idx;
- wmm_idx = mvif->wmm_idx;
- band_idx = mvif->band_idx;
+ if (vif) {
+ mvif = (struct mt7996_vif *)vif->drv_priv;
+ if (wcid->offchannel)
+ mlink = rcu_dereference(mvif->mt76.offchannel_link);
+ if (!mlink)
+ mlink = &mvif->deflink.mt76;
+ }
+
+ if (mlink) {
+ omac_idx = mlink->omac_idx;
+ wmm_idx = mlink->wmm_idx;
+ band_idx = mlink->band_idx;
}
if (inband_disc) {
@@ -910,13 +918,13 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
is_multicast_ether_addr(hdr->addr1);
u8 idx = MT7996_BASIC_RATES_TBL;
- if (mvif) {
- if (mcast && mvif->mcast_rates_idx)
- idx = mvif->mcast_rates_idx;
- else if (beacon && mvif->beacon_rates_idx)
- idx = mvif->beacon_rates_idx;
+ if (mlink) {
+ if (mcast && mlink->mcast_rates_idx)
+ idx = mlink->mcast_rates_idx;
+ else if (beacon && mlink->beacon_rates_idx)
+ idx = mlink->beacon_rates_idx;
else
- idx = mvif->basic_rates_idx;
+ idx = mlink->basic_rates_idx;
}
val = FIELD_PREP(MT_TXD6_TX_RATE, idx) | MT_TXD6_FIXED_BW;
@@ -984,8 +992,14 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (vif) {
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt76_vif_link *mlink = NULL;
+
+ if (wcid->offchannel)
+ mlink = rcu_dereference(mvif->mt76.offchannel_link);
+ if (!mlink)
+ mlink = &mvif->deflink.mt76;
- txp->fw.bss_idx = mvif->deflink.mt76.idx;
+ txp->fw.bss_idx = mlink->idx;
}
txp->fw.token = cpu_to_le16(id);
@@ -1399,7 +1413,7 @@ bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len)
mt7996_mac_tx_free(dev, data, len);
return false;
case PKT_TYPE_TXS:
- for (rxd += 4; rxd + 8 <= end; rxd += 8)
+ for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE)
mt7996_mac_add_txs(dev, rxd);
return false;
case PKT_TYPE_RX_FW_MONITOR:
@@ -1442,7 +1456,7 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
mt7996_mcu_rx_event(dev, skb);
break;
case PKT_TYPE_TXS:
- for (rxd += 4; rxd + 8 <= end; rxd += 8)
+ for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE)
mt7996_mac_add_txs(dev, rxd);
dev_kfree_skb(skb);
break;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index 69dd565d8319..b01cc7ef4799 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -249,6 +249,7 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
mlink->band_idx = band_idx;
mlink->wmm_idx = vif->type == NL80211_IFTYPE_AP ? 0 : 3;
mlink->wcid = &link->sta.wcid;
+ mlink->wcid->offchannel = mlink->offchannel;
ret = mt7996_mcu_add_dev_info(phy, vif, link_conf, mlink, true);
if (ret)
@@ -601,6 +602,33 @@ static void mt7996_configure_filter(struct ieee80211_hw *hw,
mutex_unlock(&dev->mt76.mutex);
}
+static int
+mt7996_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, int *dbm)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink);
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct wireless_dev *wdev;
+ int n_chains, delta, i;
+
+ if (!phy) {
+ wdev = ieee80211_vif_to_wdev(vif);
+ for (i = 0; i < hw->wiphy->n_radio; i++)
+ if (wdev->radio_mask & BIT(i))
+ phy = dev->radio_phy[i];
+
+ if (!phy)
+ return -EINVAL;
+ }
+
+ n_chains = hweight16(phy->mt76->chainmask);
+ delta = mt76_tx_power_nss_delta(n_chains);
+ *dbm = DIV_ROUND_UP(phy->mt76->txpower_cur + delta, 2);
+
+ return 0;
+}
+
static u8
mt7996_get_rates_table(struct mt7996_phy *phy, struct ieee80211_bss_conf *conf,
bool beacon, bool mcast)
@@ -1650,7 +1678,7 @@ const struct ieee80211_ops mt7996_ops = {
.remain_on_channel = mt76_remain_on_channel,
.cancel_remain_on_channel = mt76_cancel_remain_on_channel,
.release_buffered_frames = mt76_release_buffered_frames,
- .get_txpower = mt76_get_txpower,
+ .get_txpower = mt7996_get_txpower,
.channel_switch_beacon = mt7996_channel_switch_beacon,
.get_stats = mt7996_get_stats,
.get_et_sset_count = mt7996_get_et_sset_count,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
index 43468bcaffc6..a75e1c9435bb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
@@ -908,7 +908,8 @@ enum {
UNI_CMD_SER_SET_RECOVER_L3_TX_DISABLE,
UNI_CMD_SER_SET_RECOVER_L3_BF,
UNI_CMD_SER_SET_RECOVER_L4_MDP,
- UNI_CMD_SER_SET_RECOVER_FULL,
+ UNI_CMD_SER_SET_RECOVER_FROM_ETH,
+ UNI_CMD_SER_SET_RECOVER_FULL = 8,
UNI_CMD_SER_SET_SYSTEM_ASSERT,
/* action */
UNI_CMD_SER_ENABLE = 1,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index 7a8ee6c75cf2..9d37f8238746 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -281,7 +281,7 @@ static int mt7996_mmio_wed_reset(struct mtk_wed_device *wed)
if (test_and_set_bit(MT76_STATE_WED_RESET, &mphy->state))
return -EBUSY;
- ret = mt7996_mcu_set_ser(dev, UNI_CMD_SER_TRIGGER, UNI_CMD_SER_SET_RECOVER_L1,
+ ret = mt7996_mcu_set_ser(dev, UNI_CMD_SER_TRIGGER, UNI_CMD_SER_SET_RECOVER_FROM_ETH,
mphy->band_idx);
if (ret)
goto out;
diff --git a/drivers/net/wireless/mediatek/mt76/scan.c b/drivers/net/wireless/mediatek/mt76/scan.c
index 1c4f9deaaada..9b20ccbeb8cf 100644
--- a/drivers/net/wireless/mediatek/mt76/scan.c
+++ b/drivers/net/wireless/mediatek/mt76/scan.c
@@ -52,11 +52,6 @@ mt76_scan_send_probe(struct mt76_dev *dev, struct cfg80211_ssid *ssid)
ether_addr_copy(hdr->addr3, req->bssid);
}
- info = IEEE80211_SKB_CB(skb);
- if (req->no_cck)
- info->flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
- info->control.flags |= IEEE80211_TX_CTRL_DONT_USE_RATE_MASK;
-
if (req->ie_len)
skb_put_data(skb, req->ie, req->ie_len);
@@ -64,10 +59,20 @@ mt76_scan_send_probe(struct mt76_dev *dev, struct cfg80211_ssid *ssid)
skb_set_queue_mapping(skb, IEEE80211_AC_VO);
rcu_read_lock();
- if (ieee80211_tx_prepare_skb(phy->hw, vif, skb, band, NULL))
- mt76_tx(phy, NULL, mvif->wcid, skb);
- else
+
+ if (!ieee80211_tx_prepare_skb(phy->hw, vif, skb, band, NULL)) {
ieee80211_free_txskb(phy->hw, skb);
+ goto out;
+ }
+
+ info = IEEE80211_SKB_CB(skb);
+ if (req->no_cck)
+ info->flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
+ info->control.flags |= IEEE80211_TX_CTRL_DONT_USE_RATE_MASK;
+
+ mt76_tx(phy, NULL, mvif->wcid, skb);
+
+out:
rcu_read_unlock();
}
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index af0c50c983ec..513916469ca2 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -100,7 +100,8 @@ __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
return;
/* Tx status can be unreliable. if it fails, mark the frame as ACKed */
- if (flags & MT_TX_CB_TXS_FAILED) {
+ if (flags & MT_TX_CB_TXS_FAILED &&
+ (dev->drv->drv_flags & MT_DRV_IGNORE_TXS_FAILED)) {
info->status.rates[0].count = 0;
info->status.rates[0].idx = -1;
info->flags |= IEEE80211_TX_STAT_ACK;
diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
index eae93efa6150..82d1bf7edba2 100644
--- a/drivers/net/wireless/purelifi/plfxlc/mac.c
+++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
@@ -102,7 +102,6 @@ int plfxlc_mac_init_hw(struct ieee80211_hw *hw)
void plfxlc_mac_release(struct plfxlc_mac *mac)
{
plfxlc_chip_release(&mac->chip);
- lockdep_assert_held(&mac->lock);
}
int plfxlc_op_start(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c
index 4ce0c05c5129..569856ca677f 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c
@@ -860,9 +860,10 @@ rtl8xxxu_writeN(struct rtl8xxxu_priv *priv, u16 addr, u8 *buf, u16 len)
return len;
write_error:
- dev_info(&udev->dev,
- "%s: Failed to write block at addr: %04x size: %04x\n",
- __func__, addr, blocksize);
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE)
+ dev_info(&udev->dev,
+ "%s: Failed to write block at addr: %04x size: %04x\n",
+ __func__, addr, blocksize);
return -EAGAIN;
}
@@ -4064,8 +4065,14 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
*/
rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, fops->trxff_boundary);
- ret = rtl8xxxu_download_firmware(priv);
- dev_dbg(dev, "%s: download_firmware %i\n", __func__, ret);
+ for (int retry = 5; retry >= 0 ; retry--) {
+ ret = rtl8xxxu_download_firmware(priv);
+ dev_dbg(dev, "%s: download_firmware %i\n", __func__, ret);
+ if (ret != -EAGAIN)
+ break;
+ if (retry)
+ dev_dbg(dev, "%s: retry firmware download\n", __func__);
+ }
if (ret)
goto exit;
ret = rtl8xxxu_start_firmware(priv);
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 02389b7c6876..6b563ac489a7 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -735,6 +735,7 @@ void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
bool disable_pt = true;
+ u32 mask_hi;
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO);
@@ -755,6 +756,20 @@ void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
si->init_ra_lv = 0;
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+
+ if (rtwdev->chip->id != RTW_CHIP_TYPE_8814A)
+ return;
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO_HI);
+
+ mask_hi = si->ra_mask >> 32;
+
+ SET_RA_INFO_RA_MASK0(h2c_pkt, (mask_hi & 0xff));
+ SET_RA_INFO_RA_MASK1(h2c_pkt, (mask_hi & 0xff00) >> 8);
+ SET_RA_INFO_RA_MASK2(h2c_pkt, (mask_hi & 0xff0000) >> 16);
+ SET_RA_INFO_RA_MASK3(h2c_pkt, (mask_hi & 0xff000000) >> 24);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool connect)
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 404de1b0c407..48ad9ceab6ea 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -557,6 +557,7 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define H2C_CMD_DEFAULT_PORT 0x2c
#define H2C_CMD_RA_INFO 0x40
#define H2C_CMD_RSSI_MONITOR 0x42
+#define H2C_CMD_RA_INFO_HI 0x46
#define H2C_CMD_BCN_FILTER_OFFLOAD_P0 0x56
#define H2C_CMD_BCN_FILTER_OFFLOAD_P1 0x57
#define H2C_CMD_WL_PHY_INFO 0x58
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index cae9cca6dca3..0491f501c138 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -291,6 +291,7 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
if (rtw_read8(rtwdev, REG_CR) == 0xea)
cur_pwr = false;
else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
+ chip->id != RTW_CHIP_TYPE_8814A &&
(rtw_read8(rtwdev, REG_SYS_STATUS1 + 1) & BIT(0)))
cur_pwr = false;
else
@@ -784,7 +785,8 @@ static int __rtw_download_firmware(struct rtw_dev *rtwdev,
if (!check_firmware_size(data, size))
return -EINVAL;
- if (!ltecoex_read_reg(rtwdev, 0x38, &ltecoex_bckp))
+ if (rtwdev->chip->ltecoex_addr &&
+ !ltecoex_read_reg(rtwdev, 0x38, &ltecoex_bckp))
return -EBUSY;
wlan_cpu_enable(rtwdev, false);
@@ -802,7 +804,8 @@ static int __rtw_download_firmware(struct rtw_dev *rtwdev,
wlan_cpu_enable(rtwdev, true);
- if (!ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp)) {
+ if (rtwdev->chip->ltecoex_addr &&
+ !ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp)) {
ret = -EBUSY;
goto dlfw_fail;
}
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 0cee0fd8c0ef..9b9e76eebce9 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -1234,7 +1234,9 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
ldpc_en = VHT_LDPC_EN;
} else if (sta->deflink.ht_cap.ht_supported) {
- ra_mask |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20) |
+ ra_mask |= ((u64)sta->deflink.ht_cap.mcs.rx_mask[3] << 36) |
+ ((u64)sta->deflink.ht_cap.mcs.rx_mask[2] << 28) |
+ (sta->deflink.ht_cap.mcs.rx_mask[1] << 20) |
(sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
stbc_en = HT_STBC_EN;
@@ -1244,6 +1246,9 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
if (efuse->hw_cap.nss == 1 || rtwdev->hal.txrx_1ss)
ra_mask &= RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS;
+ else if (efuse->hw_cap.nss == 2)
+ ra_mask &= RA_MASK_VHT_RATES_2SS | RA_MASK_HT_RATES_2SS |
+ RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS;
if (hal->current_band_type == RTW_BAND_5G) {
ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4;
@@ -1302,10 +1307,9 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
break;
}
- if (sta->deflink.vht_cap.vht_supported && ra_mask & 0xffc00000)
- tx_num = 2;
- else if (sta->deflink.ht_cap.ht_supported && ra_mask & 0xfff00000)
- tx_num = 2;
+ if (sta->deflink.vht_cap.vht_supported ||
+ sta->deflink.ht_cap.ht_supported)
+ tx_num = efuse->hw_cap.nss;
rate_id = get_rate_id(wireless_set, bw_mode, tx_num);
@@ -1561,6 +1565,7 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev,
{
const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
+ int i;
ht_cap->ht_supported = true;
ht_cap->cap = 0;
@@ -1580,25 +1585,20 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev,
ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
ht_cap->ampdu_density = chip->ampdu_density;
ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
- if (efuse->hw_cap.nss > 1) {
- ht_cap->mcs.rx_mask[0] = 0xFF;
- ht_cap->mcs.rx_mask[1] = 0xFF;
- ht_cap->mcs.rx_mask[4] = 0x01;
- ht_cap->mcs.rx_highest = cpu_to_le16(300);
- } else {
- ht_cap->mcs.rx_mask[0] = 0xFF;
- ht_cap->mcs.rx_mask[1] = 0x00;
- ht_cap->mcs.rx_mask[4] = 0x01;
- ht_cap->mcs.rx_highest = cpu_to_le16(150);
- }
+
+ for (i = 0; i < efuse->hw_cap.nss; i++)
+ ht_cap->mcs.rx_mask[i] = 0xFF;
+ ht_cap->mcs.rx_mask[4] = 0x01;
+ ht_cap->mcs.rx_highest = cpu_to_le16(150 * efuse->hw_cap.nss);
}
static void rtw_init_vht_cap(struct rtw_dev *rtwdev,
struct ieee80211_sta_vht_cap *vht_cap)
{
struct rtw_efuse *efuse = &rtwdev->efuse;
- u16 mcs_map;
+ u16 mcs_map = 0;
__le16 highest;
+ int i;
if (efuse->hw_cap.ptcl != EFUSE_HW_CAP_IGNORE &&
efuse->hw_cap.ptcl != EFUSE_HW_CAP_PTCL_VHT)
@@ -1621,21 +1621,15 @@ static void rtw_init_vht_cap(struct rtw_dev *rtwdev,
if (rtw_chip_has_rx_ldpc(rtwdev))
vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
- mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
- if (efuse->hw_cap.nss > 1) {
- highest = cpu_to_le16(780);
- mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << 2;
- } else {
- highest = cpu_to_le16(390);
- mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << 2;
+ for (i = 0; i < 8; i++) {
+ if (i < efuse->hw_cap.nss)
+ mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
+ else
+ mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
}
+ highest = cpu_to_le16(390 * efuse->hw_cap.nss);
+
vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
vht_cap->vht_mcs.rx_highest = highest;
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 62cd4c526301..a61ea853f98d 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -191,6 +191,7 @@ enum rtw_chip_type {
RTW_CHIP_TYPE_8703B,
RTW_CHIP_TYPE_8821A,
RTW_CHIP_TYPE_8812A,
+ RTW_CHIP_TYPE_8814A,
};
enum rtw_tx_queue_type {
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index e438405fba56..209b6fc08a73 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -130,6 +130,7 @@
#define BIT_SHIFT_ROM_PGE 16
#define BIT_FW_INIT_RDY BIT(15)
#define BIT_FW_DW_RDY BIT(14)
+#define BIT_CPU_CLK_SEL (BIT(12) | BIT(13))
#define BIT_RPWM_TOGGLE BIT(7)
#define BIT_RAM_DL_SEL BIT(7) /* legacy only */
#define BIT_DMEM_CHKSUM_OK BIT(6)
@@ -147,7 +148,7 @@
BIT_CHECK_SUM_OK)
#define FW_READY_LEGACY (BIT_MCUFWDL_RDY | BIT_FWDL_CHK_RPT | \
BIT_WINTINI_RDY | BIT_RAM_DL_SEL)
-#define FW_READY_MASK 0xffff
+#define FW_READY_MASK (0xffff & ~BIT_CPU_CLK_SEL)
#define REG_MCU_TST_CFG 0x84
#define VAL_FW_TRIGGER 0x1
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 7f03903ddf4b..23a29019752d 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -935,11 +935,11 @@ static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
}
static void
-rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
+rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path,
+ u8 rs, u32 *phy_pwr_idx)
{
struct rtw_hal *hal = &rtwdev->hal;
static const u32 offset_txagc[2] = {0x1d00, 0x1d80};
- static u32 phy_pwr_idx;
u8 rate, rate_idx, pwr_index, shift;
int j;
@@ -947,12 +947,12 @@ rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
rate = rtw_rate_section[rs][j];
pwr_index = hal->tx_pwr_tbl[path][rate];
shift = rate & 0x3;
- phy_pwr_idx |= ((u32)pwr_index << (shift * 8));
+ *phy_pwr_idx |= ((u32)pwr_index << (shift * 8));
if (shift == 0x3) {
rate_idx = rate & 0xfc;
rtw_write32(rtwdev, offset_txagc[path] + rate_idx,
- phy_pwr_idx);
- phy_pwr_idx = 0;
+ *phy_pwr_idx);
+ *phy_pwr_idx = 0;
}
}
}
@@ -960,11 +960,13 @@ rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
static void rtw8822b_set_tx_power_index(struct rtw_dev *rtwdev)
{
struct rtw_hal *hal = &rtwdev->hal;
+ u32 phy_pwr_idx = 0;
int rs, path;
for (path = 0; path < hal->rf_path_num; path++) {
for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
- rtw8822b_set_tx_power_index_by_rate(rtwdev, path, rs);
+ rtw8822b_set_tx_power_index_by_rate(rtwdev, path, rs,
+ &phy_pwr_idx);
}
}
diff --git a/drivers/net/wireless/realtek/rtw88/util.c b/drivers/net/wireless/realtek/rtw88/util.c
index e222d3c01a77..66819f694405 100644
--- a/drivers/net/wireless/realtek/rtw88/util.c
+++ b/drivers/net/wireless/realtek/rtw88/util.c
@@ -101,7 +101,8 @@ void rtw_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss)
*nss = 4;
*mcs = rate - DESC_RATEVHT4SS_MCS0;
} else if (rate >= DESC_RATEMCS0 &&
- rate <= DESC_RATEMCS15) {
+ rate <= DESC_RATEMCS31) {
+ *nss = 0;
*mcs = rate - DESC_RATEMCS0;
}
}
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index 68316d44b204..86e8f78ec94a 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -89,10 +89,10 @@ static const struct rtw89_btc_fbtc_slot s_def[] = {
[CXST_B4] = __DEF_FBTC_SLOT(50, 0xe5555555, SLOT_MIX),
[CXST_LK] = __DEF_FBTC_SLOT(20, 0xea5a5a5a, SLOT_ISO),
[CXST_BLK] = __DEF_FBTC_SLOT(500, 0x55555555, SLOT_MIX),
- [CXST_E2G] = __DEF_FBTC_SLOT(0, 0xea5a5a5a, SLOT_MIX),
- [CXST_E5G] = __DEF_FBTC_SLOT(0, 0xffffffff, SLOT_ISO),
+ [CXST_E2G] = __DEF_FBTC_SLOT(5, 0xea5a5a5a, SLOT_MIX),
+ [CXST_E5G] = __DEF_FBTC_SLOT(5, 0xffffffff, SLOT_ISO),
[CXST_EBT] = __DEF_FBTC_SLOT(5, 0xe5555555, SLOT_MIX),
- [CXST_ENULL] = __DEF_FBTC_SLOT(0, 0xaaaaaaaa, SLOT_ISO),
+ [CXST_ENULL] = __DEF_FBTC_SLOT(5, 0xaaaaaaaa, SLOT_ISO),
[CXST_WLK] = __DEF_FBTC_SLOT(250, 0xea5a5a5a, SLOT_MIX),
[CXST_W1FDD] = __DEF_FBTC_SLOT(50, 0xffffffff, SLOT_ISO),
[CXST_B1FDD] = __DEF_FBTC_SLOT(50, 0xffffdfff, SLOT_ISO),
@@ -1372,11 +1372,9 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
} else if (ver->fcxbtcrpt == 8) {
pfinfo = &pfwinfo->rpt_ctrl.finfo.v8;
pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v8);
- break;
} else if (ver->fcxbtcrpt == 7) {
pfinfo = &pfwinfo->rpt_ctrl.finfo.v7;
pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v7);
- break;
} else {
goto err;
}
@@ -4589,17 +4587,16 @@ static void _action_bt_a2dp(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+ if (a2dp.vendor_id == 0x4c || dm->leak_ap || bt_linfo->slave_role)
+ dm->slot_dur[CXST_W1] = 20;
+ else
+ dm->slot_dur[CXST_W1] = 40;
+
+ dm->slot_dur[CXST_B1] = BTC_B1_MAX;
+
switch (btc->cx.state_map) {
case BTC_WBUSY_BNOSCAN: /* wl-busy + bt-A2DP */
- if (a2dp.vendor_id == 0x4c || dm->leak_ap) {
- dm->slot_dur[CXST_W1] = 40;
- dm->slot_dur[CXST_B1] = 200;
- _set_policy(rtwdev,
- BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP);
- } else {
- _set_policy(rtwdev,
- BTC_CXP_PAUTO_TD50B1, BTC_ACT_BT_A2DP);
- }
+ _set_policy(rtwdev, BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP);
break;
case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2DP */
_set_policy(rtwdev, BTC_CXP_PAUTO2_TD3050, BTC_ACT_BT_A2DP);
@@ -4609,15 +4606,10 @@ static void _action_bt_a2dp(struct rtw89_dev *rtwdev)
break;
case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-A2DP */
case BTC_WLINKING: /* wl-connecting + bt-A2DP */
- if (a2dp.vendor_id == 0x4c || dm->leak_ap) {
- dm->slot_dur[CXST_W1] = 40;
- dm->slot_dur[CXST_B1] = 200;
- _set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1,
- BTC_ACT_BT_A2DP);
- } else {
- _set_policy(rtwdev, BTC_CXP_AUTO_TD50B1,
- BTC_ACT_BT_A2DP);
- }
+ if (btc->cx.wl.rfk_info.con_rfk)
+ _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_BT_A2DP);
+ else
+ _set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1, BTC_ACT_BT_A2DP);
break;
case BTC_WIDLE: /* wl-idle + bt-A2DP */
_set_policy(rtwdev, BTC_CXP_AUTO_TD20B1, BTC_ACT_BT_A2DP);
@@ -4645,7 +4637,10 @@ static void _action_bt_a2dpsink(struct rtw89_dev *rtwdev)
_set_policy(rtwdev, BTC_CXP_FIX_TD2060, BTC_ACT_BT_A2DPSINK);
break;
case BTC_WLINKING: /* wl-connecting + bt-A2dp_Sink */
- _set_policy(rtwdev, BTC_CXP_FIX_TD3030, BTC_ACT_BT_A2DPSINK);
+ if (btc->cx.wl.rfk_info.con_rfk)
+ _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_BT_A2DPSINK);
+ else
+ _set_policy(rtwdev, BTC_CXP_FIX_TD3030, BTC_ACT_BT_A2DPSINK);
break;
case BTC_WIDLE: /* wl-idle + bt-A2dp_Sink */
_set_policy(rtwdev, BTC_CXP_FIX_TD2080, BTC_ACT_BT_A2DPSINK);
@@ -4699,21 +4694,20 @@ static void _action_bt_a2dp_hid(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+ if (a2dp.vendor_id == 0x4c || dm->leak_ap || bt_linfo->slave_role)
+ dm->slot_dur[CXST_W1] = 20;
+ else
+ dm->slot_dur[CXST_W1] = 40;
+
+ dm->slot_dur[CXST_B1] = BTC_B1_MAX;
+
switch (btc->cx.state_map) {
case BTC_WBUSY_BNOSCAN: /* wl-busy + bt-A2DP+HID */
case BTC_WIDLE: /* wl-idle + bt-A2DP */
- if (a2dp.vendor_id == 0x4c || dm->leak_ap) {
- dm->slot_dur[CXST_W1] = 40;
- dm->slot_dur[CXST_B1] = 200;
- _set_policy(rtwdev,
- BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP_HID);
- } else {
- _set_policy(rtwdev,
- BTC_CXP_PAUTO_TD50B1, BTC_ACT_BT_A2DP_HID);
- }
+ _set_policy(rtwdev, BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP_HID);
break;
case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2DP+HID */
- _set_policy(rtwdev, BTC_CXP_PAUTO2_TD3050, BTC_ACT_BT_A2DP_HID);
+ _set_policy(rtwdev, BTC_CXP_PAUTO2_TD3070, BTC_ACT_BT_A2DP_HID);
break;
case BTC_WSCAN_BSCAN: /* wl-scan + bt-inq + bt-A2DP+HID */
@@ -4721,15 +4715,10 @@ static void _action_bt_a2dp_hid(struct rtw89_dev *rtwdev)
break;
case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-A2DP+HID */
case BTC_WLINKING: /* wl-connecting + bt-A2DP+HID */
- if (a2dp.vendor_id == 0x4c || dm->leak_ap) {
- dm->slot_dur[CXST_W1] = 40;
- dm->slot_dur[CXST_B1] = 200;
- _set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1,
- BTC_ACT_BT_A2DP_HID);
- } else {
- _set_policy(rtwdev, BTC_CXP_AUTO_TD50B1,
- BTC_ACT_BT_A2DP_HID);
- }
+ if (btc->cx.wl.rfk_info.con_rfk)
+ _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_BT_A2DP_HID);
+ else
+ _set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1, BTC_ACT_BT_A2DP_HID);
break;
}
}
@@ -5408,7 +5397,8 @@ static void _action_wl_scan(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
- if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
+ if (btc->cx.state_map != BTC_WLINKING &&
+ RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
_action_wl_25g_mcc(rtwdev);
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], Scan offload!\n");
} else if (rtwdev->dbcc_en) {
@@ -7002,7 +6992,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
goto exit;
}
- if (wl->status.val & btc_scanning_map.val) {
+ if (wl->status.val & btc_scanning_map.val && !wl->rfk_info.con_rfk) {
_action_wl_scan(rtwdev);
bt->scan_rx_low_pri = true;
goto exit;
@@ -7223,6 +7213,8 @@ void rtw89_btc_ntfy_scan_finish(struct rtw89_dev *rtwdev, u8 phy_idx)
_fw_set_drv_info(rtwdev, CXDRVINFO_DBCC);
}
+ btc->dm.tdma_instant_excute = 1;
+
_run_coex(rtwdev, BTC_RSN_NTFY_SCAN_FINISH);
}
@@ -7671,7 +7663,8 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev,
else
wl->status.map.connecting = 0;
- if (state == BTC_ROLE_MSTS_STA_DIS_CONN)
+ if (state == BTC_ROLE_MSTS_STA_DIS_CONN ||
+ state == BTC_ROLE_MSTS_STA_CONN_END)
wl->status.map._4way = false;
_run_coex(rtwdev, BTC_RSN_NTFY_ROLE_INFO);
@@ -8115,6 +8108,7 @@ void rtw89_btc_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
return;
func = rtw89_btc_c2h_get_index_by_ver(rtwdev, func);
+ pfwinfo->cnt_c2h++;
switch (func) {
case BTF_EVNT_BUF_OVERFLOW:
@@ -11037,3 +11031,24 @@ out:
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC] use version def[%d] = 0x%08x\n",
(int)(btc->ver - rtw89_btc_ver_defs), btc->ver->fw_ver_code);
}
+
+void rtw89_btc_ntfy_preserve_bt_time(struct rtw89_dev *rtwdev, u32 ms)
+{
+ struct rtw89_btc_bt_link_info *bt_linfo = &rtwdev->btc.cx.bt.link_info;
+ struct rtw89_btc_bt_a2dp_desc a2dp = bt_linfo->a2dp_desc;
+
+ if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
+ return;
+
+ if (!a2dp.exist)
+ return;
+
+ fsleep(ms * 1000);
+}
+EXPORT_SYMBOL(rtw89_btc_ntfy_preserve_bt_time);
+
+void rtw89_btc_ntfy_conn_rfk(struct rtw89_dev *rtwdev, bool state)
+{
+ rtwdev->btc.cx.wl.rfk_info.con_rfk = state;
+}
+EXPORT_SYMBOL(rtw89_btc_ntfy_conn_rfk);
diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h
index dbdb56e063ef..757d03675cf4 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.h
+++ b/drivers/net/wireless/realtek/rtw89/coex.h
@@ -290,6 +290,8 @@ void rtw89_coex_power_on(struct rtw89_dev *rtwdev);
void rtw89_btc_set_policy(struct rtw89_dev *rtwdev, u16 policy_type);
void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type);
void rtw89_coex_recognize_ver(struct rtw89_dev *rtwdev);
+void rtw89_btc_ntfy_preserve_bt_time(struct rtw89_dev *rtwdev, u32 ms);
+void rtw89_btc_ntfy_conn_rfk(struct rtw89_dev *rtwdev, bool state);
static inline u8 rtw89_btc_phymap(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index 85f739f1173d..422cc3867f3b 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -2381,6 +2381,49 @@ static void rtw89_core_validate_rx_signal(struct ieee80211_rx_status *rx_status)
rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
}
+static void rtw89_core_update_rx_freq_from_ie(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ size_t hdr_len, ielen;
+ u8 *variable;
+ int chan;
+
+ if (!rtwdev->chip->rx_freq_frome_ie)
+ return;
+
+ if (!rtwdev->scanning)
+ return;
+
+ if (ieee80211_is_beacon(mgmt->frame_control)) {
+ variable = mgmt->u.beacon.variable;
+ hdr_len = offsetof(struct ieee80211_mgmt,
+ u.beacon.variable);
+ } else if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ variable = mgmt->u.probe_resp.variable;
+ hdr_len = offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
+ } else {
+ return;
+ }
+
+ if (skb->len > hdr_len)
+ ielen = skb->len - hdr_len;
+ else
+ return;
+
+ /* The parsing code for both 2GHz and 5GHz bands is the same in this
+ * function.
+ */
+ chan = cfg80211_get_ies_channel_number(variable, ielen, NL80211_BAND_2GHZ);
+ if (chan == -1)
+ return;
+
+ rx_status->band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
+ rx_status->freq = ieee80211_channel_to_frequency(chan, rx_status->band);
+}
+
static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct rtw89_rx_desc_info *desc_info,
@@ -2398,6 +2441,7 @@ static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
rtw89_core_update_rx_status_by_ppdu(rtwdev, rx_status, phy_ppdu);
rtw89_core_update_radiotap(rtwdev, skb_ppdu, rx_status);
rtw89_core_validate_rx_signal(rx_status);
+ rtw89_core_update_rx_freq_from_ie(rtwdev, skb_ppdu, rx_status);
/* In low power mode, it does RX in thread context. */
local_bh_disable();
@@ -5042,8 +5086,6 @@ static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev)
rtw89_hci_mac_pre_deinit(rtwdev);
- rtw89_mac_pwr_off(rtwdev);
-
return 0;
}
@@ -5124,36 +5166,45 @@ int rtw89_chip_info_setup(struct rtw89_dev *rtwdev)
rtw89_read_chip_ver(rtwdev);
+ ret = rtw89_mac_pwr_on(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to power on\n");
+ return ret;
+ }
+
ret = rtw89_wait_firmware_completion(rtwdev);
if (ret) {
rtw89_err(rtwdev, "failed to wait firmware completion\n");
- return ret;
+ goto out;
}
ret = rtw89_fw_recognize(rtwdev);
if (ret) {
rtw89_err(rtwdev, "failed to recognize firmware\n");
- return ret;
+ goto out;
}
ret = rtw89_chip_efuse_info_setup(rtwdev);
if (ret)
- return ret;
+ goto out;
ret = rtw89_fw_recognize_elements(rtwdev);
if (ret) {
rtw89_err(rtwdev, "failed to recognize firmware elements\n");
- return ret;
+ goto out;
}
ret = rtw89_chip_board_info_setup(rtwdev);
if (ret)
- return ret;
+ goto out;
rtw89_core_setup_rfe_parms(rtwdev);
rtwdev->ps_mode = rtw89_update_ps_mode(rtwdev);
- return 0;
+out:
+ rtw89_mac_pwr_off(rtwdev);
+
+ return ret;
}
EXPORT_SYMBOL(rtw89_chip_info_setup);
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 93e41def81b4..963f0046f0bc 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -17,6 +17,7 @@ struct rtw89_dev;
struct rtw89_pci_info;
struct rtw89_mac_gen_def;
struct rtw89_phy_gen_def;
+struct rtw89_fw_blacklist;
struct rtw89_efuse_block_cfg;
struct rtw89_h2c_rf_tssi;
struct rtw89_fw_txpwr_track_cfg;
@@ -1761,7 +1762,8 @@ struct rtw89_btc_wl_rfk_info {
u32 phy_map: 2;
u32 band: 2;
u32 type: 8;
- u32 rsvd: 14;
+ u32 con_rfk: 1;
+ u32 rsvd: 13;
u32 start_time;
u32 proc_time;
@@ -4251,6 +4253,7 @@ struct rtw89_chip_info {
bool try_ce_fw;
u8 bbmcu_nr;
u32 needed_fw_elms;
+ const struct rtw89_fw_blacklist *fw_blacklist;
u32 fifo_size;
bool small_fifo_size;
u32 dle_scc_rsvd_size;
@@ -4273,6 +4276,7 @@ struct rtw89_chip_info {
bool support_ant_gain;
bool ul_tb_waveform_ctrl;
bool ul_tb_pwr_diff;
+ bool rx_freq_frome_ie;
bool hw_sec_hdr;
bool hw_mgmt_tx_encrypt;
u8 rf_path_num;
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 2f3869c70069..1fbcba718998 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -38,6 +38,16 @@ struct rtw89_arp_rsp {
static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C};
+const struct rtw89_fw_blacklist rtw89_fw_blacklist_default = {
+ .ver = 0x00,
+ .list = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ },
+};
+EXPORT_SYMBOL(rtw89_fw_blacklist_default);
+
union rtw89_fw_element_arg {
size_t offset;
enum rtw89_rf_path rf_path;
@@ -314,7 +324,7 @@ static int __parse_formatted_mssc(struct rtw89_dev *rtwdev,
if (!sec->secure_boot)
goto out;
- sb_sel_ver = le32_to_cpu(section_content->sb_sel_ver.v);
+ sb_sel_ver = get_unaligned_le32(&section_content->sb_sel_ver.v);
if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn)
goto ignore;
@@ -344,6 +354,46 @@ ignore:
return 0;
}
+static int __check_secure_blacklist(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_bin_info *info,
+ struct rtw89_fw_hdr_section_info *section_info,
+ const void *content)
+{
+ const struct rtw89_fw_blacklist *chip_blacklist = rtwdev->chip->fw_blacklist;
+ const union rtw89_fw_section_mssc_content *section_content = content;
+ struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
+ u8 byte_idx;
+ u8 bit_mask;
+
+ if (!sec->secure_boot)
+ return 0;
+
+ if (!info->secure_section_exist || section_info->ignore)
+ return 0;
+
+ if (!chip_blacklist) {
+ rtw89_err(rtwdev, "chip no blacklist for secure firmware\n");
+ return -ENOENT;
+ }
+
+ byte_idx = section_content->blacklist.bit_in_chip_list >> 3;
+ bit_mask = BIT(section_content->blacklist.bit_in_chip_list & 0x7);
+
+ if (section_content->blacklist.ver > chip_blacklist->ver) {
+ rtw89_err(rtwdev, "chip blacklist out of date (%u, %u)\n",
+ section_content->blacklist.ver, chip_blacklist->ver);
+ return -EINVAL;
+ }
+
+ if (chip_blacklist->list[byte_idx] & bit_mask) {
+ rtw89_err(rtwdev, "firmware %u in chip blacklist\n",
+ section_content->blacklist.ver);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
static int __parse_security_section(struct rtw89_dev *rtwdev,
struct rtw89_fw_bin_info *info,
struct rtw89_fw_hdr_section_info *section_info,
@@ -374,7 +424,7 @@ static int __parse_security_section(struct rtw89_dev *rtwdev,
info->secure_section_exist = true;
}
- return 0;
+ return __check_secure_blacklist(rtwdev, info, section_info, content);
}
static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
@@ -489,6 +539,30 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev,
}
}
+static int rtw89_mfw_validate_hdr(struct rtw89_dev *rtwdev,
+ const struct firmware *firmware,
+ const struct rtw89_mfw_hdr *mfw_hdr)
+{
+ const void *mfw = firmware->data;
+ u32 mfw_len = firmware->size;
+ u8 fw_nr = mfw_hdr->fw_nr;
+ const void *ptr;
+
+ if (fw_nr == 0) {
+ rtw89_err(rtwdev, "mfw header has no fw entry\n");
+ return -ENOENT;
+ }
+
+ ptr = &mfw_hdr->info[fw_nr];
+
+ if (ptr > mfw + mfw_len) {
+ rtw89_err(rtwdev, "mfw header out of address\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
static
int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
struct rtw89_fw_suit *fw_suit, bool nowarn)
@@ -499,6 +573,7 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
u32 mfw_len = firmware->size;
const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw;
const struct rtw89_mfw_info *mfw_info = NULL, *tmp;
+ int ret;
int i;
if (mfw_hdr->sig != RTW89_MFW_SIG) {
@@ -511,6 +586,10 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
return 0;
}
+ ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr);
+ if (ret)
+ return ret;
+
for (i = 0; i < mfw_hdr->fw_nr; i++) {
tmp = &mfw_hdr->info[i];
if (tmp->type != type)
@@ -540,6 +619,12 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
found:
fw_suit->data = mfw + le32_to_cpu(mfw_info->shift);
fw_suit->size = le32_to_cpu(mfw_info->size);
+
+ if (fw_suit->data + fw_suit->size > mfw + mfw_len) {
+ rtw89_err(rtwdev, "fw_suit %d out of address\n", type);
+ return -EFAULT;
+ }
+
return 0;
}
@@ -551,12 +636,17 @@ static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev)
(const struct rtw89_mfw_hdr *)firmware->data;
const struct rtw89_mfw_info *mfw_info;
u32 size;
+ int ret;
if (mfw_hdr->sig != RTW89_MFW_SIG) {
rtw89_warn(rtwdev, "not mfw format\n");
return 0;
}
+ ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr);
+ if (ret)
+ return ret;
+
mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1];
size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size);
@@ -1322,7 +1412,6 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
- ret = -1;
goto fail;
}
@@ -1409,7 +1498,6 @@ static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
ret = rtw89_h2c_tx(rtwdev, skb, true);
if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
- ret = -1;
goto fail;
}
@@ -3281,9 +3369,10 @@ int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 |
CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
- h2c->w6 = le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0,
+ h2c->w6 = le32_encode_bits(vif->cfg.aid, CCTLINFO_G7_W6_AID12_PAID) |
+ le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0,
CCTLINFO_G7_W6_ULDL);
- h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ULDL);
+ h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_AID12_PAID | CCTLINFO_G7_W6_ULDL);
if (rtwsta_link) {
h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he,
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 2026bc2fd2ac..ee2be09bd3db 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -664,6 +664,11 @@ struct rtw89_fw_mss_pool_hdr {
union rtw89_fw_section_mssc_content {
struct {
+ u8 pad[0x20];
+ u8 bit_in_chip_list;
+ u8 ver;
+ } __packed blacklist;
+ struct {
u8 pad[58];
__le32 v;
} __packed sb_sel_ver;
@@ -673,6 +678,13 @@ union rtw89_fw_section_mssc_content {
} __packed key_sign_len;
} __packed;
+struct rtw89_fw_blacklist {
+ u8 ver;
+ u8 list[32];
+};
+
+extern const struct rtw89_fw_blacklist rtw89_fw_blacklist_default;
+
static inline void SET_CTRL_INFO_MACID(void *table, u32 val)
{
le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0));
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index a37c6d525d6f..def12dbfe48d 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -1495,6 +1495,21 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
#undef PWR_ACT
}
+int rtw89_mac_pwr_on(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw89_mac_power_switch(rtwdev, true);
+ if (ret) {
+ rtw89_mac_power_switch(rtwdev, false);
+ ret = rtw89_mac_power_switch(rtwdev, true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev)
{
rtw89_mac_power_switch(rtwdev, false);
@@ -3996,14 +4011,6 @@ int rtw89_mac_partial_init(struct rtw89_dev *rtwdev, bool include_bb)
{
int ret;
- ret = rtw89_mac_power_switch(rtwdev, true);
- if (ret) {
- rtw89_mac_power_switch(rtwdev, false);
- ret = rtw89_mac_power_switch(rtwdev, true);
- if (ret)
- return ret;
- }
-
rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
if (include_bb) {
@@ -4036,6 +4043,10 @@ int rtw89_mac_init(struct rtw89_dev *rtwdev)
bool include_bb = !!chip->bbmcu_nr;
int ret;
+ ret = rtw89_mac_pwr_on(rtwdev);
+ if (ret)
+ return ret;
+
ret = rtw89_mac_partial_init(rtwdev, include_bb);
if (ret)
goto fail;
@@ -4067,7 +4078,7 @@ int rtw89_mac_init(struct rtw89_dev *rtwdev)
return ret;
fail:
- rtw89_mac_power_switch(rtwdev, false);
+ rtw89_mac_pwr_off(rtwdev);
return ret;
}
@@ -4826,6 +4837,32 @@ void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
rtw89_write32_set(rtwdev, reg, mac->narrow_bw_ru_dis.mask);
}
+void rtw89_mac_set_he_tb(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
+{
+ struct ieee80211_bss_conf *bss_conf;
+ bool set;
+ u32 reg;
+
+ if (rtwdev->chip->chip_gen != RTW89_CHIP_BE)
+ return;
+
+ rcu_read_lock();
+
+ bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
+ set = bss_conf->he_support && !bss_conf->eht_support;
+
+ rcu_read_unlock();
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CLIENT_OM_CTRL,
+ rtwvif_link->mac_idx);
+
+ if (set)
+ rtw89_write32_set(rtwdev, reg, B_BE_TRIG_DIS_EHTTB);
+ else
+ rtw89_write32_clr(rtwdev, reg, B_BE_TRIG_DIS_EHTTB);
+}
+
void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
{
rtw89_mac_port_cfg_func_sw(rtwdev, rtwvif_link);
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index 8edea96d037f..71574dbd8764 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -1145,6 +1145,7 @@ rtw89_write32_port_set(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_l
rtw89_write32_set(rtwdev, reg, bit);
}
+int rtw89_mac_pwr_on(struct rtw89_dev *rtwdev);
void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev);
int rtw89_mac_partial_init(struct rtw89_dev *rtwdev, bool include_bb);
int rtw89_mac_init(struct rtw89_dev *rtwdev);
@@ -1185,6 +1186,8 @@ void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link, bool en);
void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
+void rtw89_mac_set_he_tb(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link);
void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link);
void rtw89_mac_enable_beacon_for_ap_vifs(struct rtw89_dev *rtwdev, bool en);
int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif_link *vif);
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index b3669e0074df..7c9b53a9ba3b 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -670,6 +670,7 @@ static void __rtw89_ops_bss_link_assoc(struct rtw89_dev *rtwdev,
rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, rtwvif_link);
rtw89_mac_port_update(rtwdev, rtwvif_link);
rtw89_mac_set_he_obss_narrow_bw_ru(rtwdev, rtwvif_link);
+ rtw89_mac_set_he_tb(rtwdev, rtwvif_link);
}
static void __rtw89_ops_bss_assoc(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 10d0efa7a58e..850ae5bf50ef 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -7095,6 +7095,10 @@
#define B_BE_MACLBK_RDY_NUM_MASK GENMASK(7, 3)
#define B_BE_MACLBK_EN BIT(0)
+#define R_BE_CLIENT_OM_CTRL 0x11040
+#define R_BE_CLIENT_OM_CTRL_C1 0x15040
+#define B_BE_TRIG_DIS_EHTTB BIT(24)
+
#define R_BE_WMAC_NAV_CTL 0x11080
#define R_BE_WMAC_NAV_CTL_C1 0x15080
#define B_BE_WMAC_NAV_UPPER_EN BIT(26)
diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
index 80b2f74589eb..5b8d95c90d73 100644
--- a/drivers/net/wireless/realtek/rtw89/regd.c
+++ b/drivers/net/wireless/realtek/rtw89/regd.c
@@ -720,6 +720,7 @@ void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct rtw89_dev *rtwdev = hw->priv;
+ wiphy_lock(wiphy);
mutex_lock(&rtwdev->mutex);
rtw89_leave_ps_mode(rtwdev);
@@ -737,6 +738,7 @@ void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request
exit:
mutex_unlock(&rtwdev->mutex);
+ wiphy_unlock(wiphy);
}
/* Maximum Transmit Power field (@raw) can be EIRP or PSD.
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index c56f70267882..546e88cd4649 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -1596,10 +1596,16 @@ static void rtw8851b_rfk_channel(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
+ rtw89_btc_ntfy_conn_rfk(rtwdev, true);
+
rtw8851b_rx_dck(rtwdev, phy_idx, chanctx_idx);
rtw8851b_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8851b_tssi(rtwdev, phy_idx, true, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8851b_dpk(rtwdev, phy_idx, chanctx_idx);
+
+ rtw89_btc_ntfy_conn_rfk(rtwdev, false);
}
static void rtw8851b_rfk_band_changed(struct rtw89_dev *rtwdev,
@@ -2445,6 +2451,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.try_ce_fw = true,
.bbmcu_nr = 0,
.needed_fw_elms = 0,
+ .fw_blacklist = NULL,
.fifo_size = 196608,
.small_fifo_size = true,
.dle_scc_rsvd_size = 98304,
@@ -2485,6 +2492,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.support_ant_gain = false,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
+ .rx_freq_frome_ie = true,
.hw_sec_hdr = false,
.hw_mgmt_tx_encrypt = false,
.rf_path_num = 1,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 9bd2842c27d5..e12e5bd402e5 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -1356,10 +1356,16 @@ static void rtw8852a_rfk_channel(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
+ rtw89_btc_ntfy_conn_rfk(rtwdev, true);
+
rtw8852a_rx_dck(rtwdev, phy_idx, true, chanctx_idx);
rtw8852a_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852a_tssi(rtwdev, phy_idx, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852a_dpk(rtwdev, phy_idx, chanctx_idx);
+
+ rtw89_btc_ntfy_conn_rfk(rtwdev, false);
}
static void rtw8852a_rfk_band_changed(struct rtw89_dev *rtwdev,
@@ -2162,6 +2168,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.try_ce_fw = false,
.bbmcu_nr = 0,
.needed_fw_elms = 0,
+ .fw_blacklist = NULL,
.fifo_size = 458752,
.small_fifo_size = false,
.dle_scc_rsvd_size = 0,
@@ -2203,6 +2210,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.support_ant_gain = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
+ .rx_freq_frome_ie = true,
.hw_sec_hdr = false,
.hw_mgmt_tx_encrypt = false,
.rf_path_num = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index dfb2bf61b0b8..ab9365b0ec08 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -568,10 +568,16 @@ static void rtw8852b_rfk_channel(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
+ rtw89_btc_ntfy_conn_rfk(rtwdev, true);
+
rtw8852b_rx_dck(rtwdev, phy_idx, chanctx_idx);
rtw8852b_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852b_tssi(rtwdev, phy_idx, true, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852b_dpk(rtwdev, phy_idx, chanctx_idx);
+
+ rtw89_btc_ntfy_conn_rfk(rtwdev, false);
}
static void rtw8852b_rfk_band_changed(struct rtw89_dev *rtwdev,
@@ -798,6 +804,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.try_ce_fw = true,
.bbmcu_nr = 0,
.needed_fw_elms = 0,
+ .fw_blacklist = &rtw89_fw_blacklist_default,
.fifo_size = 196608,
.small_fifo_size = true,
.dle_scc_rsvd_size = 98304,
@@ -839,6 +846,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.support_ant_gain = true,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
+ .rx_freq_frome_ie = true,
.hw_sec_hdr = false,
.hw_mgmt_tx_encrypt = false,
.rf_path_num = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
index bde3e1fb7ca6..412e633944f3 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
@@ -541,10 +541,16 @@ static void rtw8852bt_rfk_channel(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
+ rtw89_btc_ntfy_conn_rfk(rtwdev, true);
+
rtw8852bt_rx_dck(rtwdev, phy_idx, chanctx_idx);
rtw8852bt_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852bt_tssi(rtwdev, phy_idx, true, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852bt_dpk(rtwdev, phy_idx, chanctx_idx);
+
+ rtw89_btc_ntfy_conn_rfk(rtwdev, false);
}
static void rtw8852bt_rfk_band_changed(struct rtw89_dev *rtwdev,
@@ -732,6 +738,7 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
.try_ce_fw = true,
.bbmcu_nr = 0,
.needed_fw_elms = RTW89_AX_GEN_DEF_NEEDED_FW_ELEMENTS_NO_6GHZ,
+ .fw_blacklist = &rtw89_fw_blacklist_default,
.fifo_size = 458752,
.small_fifo_size = true,
.dle_scc_rsvd_size = 98304,
@@ -772,6 +779,7 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
.support_ant_gain = true,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
+ .rx_freq_frome_ie = true,
.hw_sec_hdr = false,
.hw_mgmt_tx_encrypt = false,
.rf_path_num = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index bc84b15e7826..cd68f6cbeecb 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -1853,10 +1853,16 @@ static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
rtw8852c_mcc_get_ch_info(rtwdev, phy_idx);
+ rtw89_btc_ntfy_conn_rfk(rtwdev, true);
+
rtw8852c_rx_dck(rtwdev, phy_idx, false);
rtw8852c_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852c_tssi(rtwdev, phy_idx, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852c_dpk(rtwdev, phy_idx, chanctx_idx);
+
+ rtw89_btc_ntfy_conn_rfk(rtwdev, false);
rtw89_fw_h2c_rf_ntfy_mcc(rtwdev);
}
@@ -2954,6 +2960,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.try_ce_fw = false,
.bbmcu_nr = 0,
.needed_fw_elms = 0,
+ .fw_blacklist = &rtw89_fw_blacklist_default,
.fifo_size = 458752,
.small_fifo_size = false,
.dle_scc_rsvd_size = 0,
@@ -2998,6 +3005,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.support_ant_gain = true,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = true,
+ .rx_freq_frome_ie = false,
.hw_sec_hdr = true,
.hw_mgmt_tx_encrypt = true,
.rf_path_num = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 11d66bfceb15..2696fdf350f6 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -2721,6 +2721,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.try_ce_fw = false,
.bbmcu_nr = 1,
.needed_fw_elms = RTW89_BE_GEN_DEF_NEEDED_FW_ELEMENTS,
+ .fw_blacklist = &rtw89_fw_blacklist_default,
.fifo_size = 589824,
.small_fifo_size = false,
.dle_scc_rsvd_size = 0,
@@ -2763,6 +2764,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.support_ant_gain = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
+ .rx_freq_frome_ie = false,
.hw_sec_hdr = true,
.hw_mgmt_tx_encrypt = true,
.rf_path_num = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index 26a944d3b672..d0c8584308c0 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -156,9 +156,11 @@ static void ser_state_run(struct rtw89_ser *ser, u8 evt)
rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n",
ser_st_name(ser), ser_ev_name(ser, evt));
+ wiphy_lock(rtwdev->hw->wiphy);
mutex_lock(&rtwdev->mutex);
rtw89_leave_lps(rtwdev);
mutex_unlock(&rtwdev->mutex);
+ wiphy_unlock(rtwdev->hw->wiphy);
ser->st_tbl[ser->state].st_func(ser, evt);
}
@@ -708,9 +710,11 @@ static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt)
switch (evt) {
case SER_EV_STATE_IN:
+ wiphy_lock(rtwdev->hw->wiphy);
mutex_lock(&rtwdev->mutex);
ser_l2_reset_st_pre_hdl(ser);
mutex_unlock(&rtwdev->mutex);
+ wiphy_unlock(rtwdev->hw->wiphy);
ieee80211_restart_hw(rtwdev->hw);
ser_set_alarm(ser, SER_RECFG_TIMEOUT, SER_EV_L2_RECFG_TIMEOUT);
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index cf6a331d4042..a68530344d20 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -4,7 +4,7 @@
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2024 Intel Corporation
+ * Copyright (C) 2018 - 2025 Intel Corporation
*/
/*
@@ -1983,11 +1983,13 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
return;
}
- if (sta && sta->mlo) {
- if (WARN_ON(!link_sta)) {
- ieee80211_free_txskb(hw, skb);
- return;
- }
+ /* Do address translations only between shared links. It is
+ * possible that while an non-AP MLD station and an AP MLD
+ * station have shared links, the frame is intended to be sent
+ * on a link which is not shared (for example when sending a
+ * probe response).
+ */
+ if (sta && sta->mlo && link_sta) {
/* address translation to link addresses on TX */
ether_addr_copy(hdr->addr1, link_sta->addr);
ether_addr_copy(hdr->addr2, bss_conf->addr);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 63fe51d0e64d..809b407cece1 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -985,20 +985,27 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_TX:
- get_page(pdata);
xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ trace_xdp_exception(queue->info->netdev, prog, act);
+ break;
+ }
+ get_page(pdata);
err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
- if (unlikely(!err))
+ if (unlikely(err <= 0)) {
+ if (err < 0)
+ trace_xdp_exception(queue->info->netdev, prog, act);
xdp_return_frame_rx_napi(xdpf);
- else if (unlikely(err < 0))
- trace_xdp_exception(queue->info->netdev, prog, act);
+ }
break;
case XDP_REDIRECT:
get_page(pdata);
err = xdp_do_redirect(queue->info->netdev, xdp, prog);
*need_xdp_flush = true;
- if (unlikely(err))
+ if (unlikely(err)) {
trace_xdp_exception(queue->info->netdev, prog, act);
+ xdp_return_buff(xdp);
+ }
break;
case XDP_PASS:
case XDP_DROP:
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
index d687e8c2cc78..63ceed89b62e 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -1318,6 +1318,7 @@ static const struct pci_device_id amd_ntb_pci_tbl[] = {
{ PCI_VDEVICE(AMD, 0x148b), (kernel_ulong_t)&dev_data[1] },
{ PCI_VDEVICE(AMD, 0x14c0), (kernel_ulong_t)&dev_data[1] },
{ PCI_VDEVICE(AMD, 0x14c3), (kernel_ulong_t)&dev_data[1] },
+ { PCI_VDEVICE(AMD, 0x155a), (kernel_ulong_t)&dev_data[1] },
{ PCI_VDEVICE(HYGON, 0x145b), (kernel_ulong_t)&dev_data[0] },
{ 0, }
};
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index 544d8a4d2af5..f27df8d7f3b9 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -1041,7 +1041,7 @@ static inline char *idt_get_mw_name(enum idt_mw_type mw_type)
static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port,
unsigned char *mw_cnt)
{
- struct idt_mw_cfg mws[IDT_MAX_NR_MWS], *ret_mws;
+ struct idt_mw_cfg *mws;
const struct idt_ntb_bar *bars;
enum idt_mw_type mw_type;
unsigned char widx, bidx, en_cnt;
@@ -1049,6 +1049,11 @@ static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port,
int aprt_size;
u32 data;
+ mws = devm_kcalloc(&ndev->ntb.pdev->dev, IDT_MAX_NR_MWS,
+ sizeof(*mws), GFP_KERNEL);
+ if (!mws)
+ return ERR_PTR(-ENOMEM);
+
/* Retrieve the array of the BARs registers */
bars = portdata_tbl[port].bars;
@@ -1103,16 +1108,7 @@ static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port,
}
}
- /* Allocate memory for memory window descriptors */
- ret_mws = devm_kcalloc(&ndev->ntb.pdev->dev, *mw_cnt, sizeof(*ret_mws),
- GFP_KERNEL);
- if (!ret_mws)
- return ERR_PTR(-ENOMEM);
-
- /* Copy the info of detected memory windows */
- memcpy(ret_mws, mws, (*mw_cnt)*sizeof(*ret_mws));
-
- return ret_mws;
+ return mws;
}
/*
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 082253a3a956..04f4a049599a 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -442,7 +442,8 @@ int nd_label_data_init(struct nvdimm_drvdata *ndd)
if (ndd->data)
return 0;
- if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) {
+ if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0 ||
+ ndd->nsarea.config_size == 0) {
dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n",
ndd->nsarea.max_xfer, ndd->nsarea.config_size);
return -ENXIO;
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 486afe598184..09ed1f61c9a8 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -97,6 +97,7 @@ config NVME_TCP_TLS
depends on NVME_TCP
select NET_HANDSHAKE
select KEYS
+ select TLS
help
Enables TLS encryption for NVMe TCP using the netlink handshake API.
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 8359d0aa0e44..8863c9fcb4aa 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2059,7 +2059,21 @@ static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
else
- atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
+ atomic_bs = (1 + ns->ctrl->awupf) * bs;
+
+ /*
+ * Set subsystem atomic bs.
+ */
+ if (ns->ctrl->subsys->atomic_bs) {
+ if (atomic_bs != ns->ctrl->subsys->atomic_bs) {
+ dev_err_ratelimited(ns->ctrl->device,
+ "%s: Inconsistent Atomic Write Size, Namespace will not be added: Subsystem=%d bytes, Controller/Namespace=%d bytes\n",
+ ns->disk ? ns->disk->disk_name : "?",
+ ns->ctrl->subsys->atomic_bs,
+ atomic_bs);
+ }
+ } else
+ ns->ctrl->subsys->atomic_bs = atomic_bs;
nvme_update_atomic_write_disk_info(ns, id, lim, bs, atomic_bs);
}
@@ -2201,6 +2215,17 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
nvme_set_chunk_sectors(ns, id, &lim);
if (!nvme_update_disk_info(ns, id, &lim))
capacity = 0;
+
+ /*
+ * Validate the max atomic write size fits within the subsystem's
+ * atomic write capabilities.
+ */
+ if (lim.atomic_write_hw_max > ns->ctrl->subsys->atomic_bs) {
+ blk_mq_unfreeze_queue(ns->disk->queue, memflags);
+ ret = -ENXIO;
+ goto out;
+ }
+
nvme_config_discard(ns, &lim);
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
ns->head->ids.csi == NVME_CSI_ZNS)
@@ -3031,7 +3056,6 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
kfree(subsys);
return -EINVAL;
}
- subsys->awupf = le16_to_cpu(id->awupf);
nvme_mpath_default_iopolicy(subsys);
subsys->dev.class = &nvme_subsys_class;
@@ -3441,7 +3465,7 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
dev_pm_qos_expose_latency_tolerance(ctrl->device);
else if (!ctrl->apst_enabled && prev_apst_enabled)
dev_pm_qos_hide_latency_tolerance(ctrl->device);
-
+ ctrl->awupf = le16_to_cpu(id->awupf);
out_free:
kfree(id);
return ret;
@@ -4292,6 +4316,15 @@ static void nvme_scan_work(struct work_struct *work)
nvme_scan_ns_sequential(ctrl);
}
mutex_unlock(&ctrl->scan_lock);
+
+ /* Requeue if we have missed AENs */
+ if (test_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events))
+ nvme_queue_scan(ctrl);
+#ifdef CONFIG_NVME_MULTIPATH
+ else if (ctrl->ana_log_buf)
+ /* Re-read the ANA log page to not miss updates */
+ queue_work(nvme_wq, &ctrl->ana_work);
+#endif
}
/*
@@ -4483,7 +4516,8 @@ static void nvme_fw_act_work(struct work_struct *work)
msleep(100);
}
- if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING) ||
+ !nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
return;
nvme_unquiesce_io_queues(ctrl);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 2a7635565083..ac17e650327f 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -427,7 +427,7 @@ static bool nvme_available_path(struct nvme_ns_head *head)
struct nvme_ns *ns;
if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
- return NULL;
+ return false;
list_for_each_entry_srcu(ns, &head->list, siblings,
srcu_read_lock_held(&head->srcu)) {
@@ -638,7 +638,8 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
blk_set_stacking_limits(&lim);
lim.dma_alignment = 3;
- lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL;
+ lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT |
+ BLK_FEAT_POLL | BLK_FEAT_ATOMIC_WRITES;
if (head->ids.csi == NVME_CSI_ZNS)
lim.features |= BLK_FEAT_ZONED;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 7be92d07430e..3804f91b1942 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -410,6 +410,7 @@ struct nvme_ctrl {
enum nvme_ctrl_type cntrltype;
enum nvme_dctype dctype;
+ u16 awupf; /* 0's based value. */
};
static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl)
@@ -442,11 +443,11 @@ struct nvme_subsystem {
u8 cmic;
enum nvme_subsys_type subtype;
u16 vendor_id;
- u16 awupf; /* 0's based awupf value. */
struct ida ns_ida;
#ifdef CONFIG_NVME_MULTIPATH
enum nvme_iopolicy iopolicy;
#endif
+ u32 atomic_bs;
};
/*
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 1dc12784efaf..28f560f86e91 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -390,7 +390,7 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
* as it only leads to a small amount of wasted memory for the lifetime of
* the I/O.
*/
-static int nvme_pci_npages_prp(void)
+static __always_inline int nvme_pci_npages_prp(void)
{
unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE;
unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE);
@@ -1205,7 +1205,9 @@ static void nvme_poll_irqdisable(struct nvme_queue *nvmeq)
WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags));
disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
+ spin_lock(&nvmeq->cq_poll_lock);
nvme_poll_cq(nvmeq, NULL);
+ spin_unlock(&nvmeq->cq_poll_lock);
enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
}
@@ -3578,7 +3580,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
dev_info(dev->ctrl.device, "restart after slot reset\n");
pci_restore_state(pdev);
- if (!nvme_try_sched_reset(&dev->ctrl))
+ if (nvme_try_sched_reset(&dev->ctrl))
nvme_unquiesce_io_queues(&dev->ctrl);
return PCI_ERS_RESULT_RECOVERED;
}
@@ -3626,6 +3628,9 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */
.driver_data = NVME_QUIRK_DMAPOOL_ALIGN_512, },
+ { PCI_DEVICE(0x126f, 0x1001), /* Silicon Motion generic */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_BOGUS_NID, },
@@ -3649,6 +3654,9 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x15b7, 0x5008), /* Sandisk SN530 */
.driver_data = NVME_QUIRK_BROKEN_MSI },
+ { PCI_DEVICE(0x15b7, 0x5009), /* Sandisk SN550 */
+ .driver_data = NVME_QUIRK_BROKEN_MSI |
+ NVME_QUIRK_NO_DEEPEST_PS },
{ PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
@@ -3734,6 +3742,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ { PCI_DEVICE(0x025e, 0xf1ac), /* SOLIDIGM P44 pro SSDPFKKW020X7 */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 327f3f2f5399..d991baa82a1c 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1944,7 +1944,7 @@ static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
cancel_work_sync(&queue->io_work);
}
-static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
+static void nvme_tcp_stop_queue_nowait(struct nvme_ctrl *nctrl, int qid)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
@@ -1963,6 +1963,31 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
mutex_unlock(&queue->queue_lock);
}
+static void nvme_tcp_wait_queue(struct nvme_ctrl *nctrl, int qid)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+ struct nvme_tcp_queue *queue = &ctrl->queues[qid];
+ int timeout = 100;
+
+ while (timeout > 0) {
+ if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags) ||
+ !sk_wmem_alloc_get(queue->sock->sk))
+ return;
+ msleep(2);
+ timeout -= 2;
+ }
+ dev_warn(nctrl->device,
+ "qid %d: timeout draining sock wmem allocation expired\n",
+ qid);
+}
+
+static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
+{
+ nvme_tcp_stop_queue_nowait(nctrl, qid);
+ nvme_tcp_wait_queue(nctrl, qid);
+}
+
+
static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
{
write_lock_bh(&queue->sock->sk->sk_callback_lock);
@@ -2030,7 +2055,9 @@ static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
int i;
for (i = 1; i < ctrl->queue_count; i++)
- nvme_tcp_stop_queue(ctrl, i);
+ nvme_tcp_stop_queue_nowait(ctrl, i);
+ for (i = 1; i < ctrl->queue_count; i++)
+ nvme_tcp_wait_queue(ctrl, i);
}
static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index fb7446d6d682..4c253b433bf7 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -98,6 +98,7 @@ config NVME_TARGET_TCP_TLS
bool "NVMe over Fabrics TCP target TLS encryption support"
depends on NVME_TARGET_TCP
select NET_HANDSHAKE
+ select TLS
help
Enables TLS encryption for the NVMe TCP target using the netlink handshake API.
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 2e741696f371..6ccce0ee5157 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -324,6 +324,9 @@ int nvmet_enable_port(struct nvmet_port *port)
lockdep_assert_held(&nvmet_config_sem);
+ if (port->disc_addr.trtype == NVMF_TRTYPE_MAX)
+ return -EINVAL;
+
ops = nvmet_transports[port->disc_addr.trtype];
if (!ops) {
up_write(&nvmet_config_sem);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 7318b736d414..ef8c5961e10c 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1028,33 +1028,24 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
struct nvmet_fc_hostport *newhost, *match = NULL;
unsigned long flags;
+ /*
+ * Caller holds a reference on tgtport.
+ */
+
/* if LLDD not implemented, leave as NULL */
if (!hosthandle)
return NULL;
- /*
- * take reference for what will be the newly allocated hostport if
- * we end up using a new allocation
- */
- if (!nvmet_fc_tgtport_get(tgtport))
- return ERR_PTR(-EINVAL);
-
spin_lock_irqsave(&tgtport->lock, flags);
match = nvmet_fc_match_hostport(tgtport, hosthandle);
spin_unlock_irqrestore(&tgtport->lock, flags);
- if (match) {
- /* no new allocation - release reference */
- nvmet_fc_tgtport_put(tgtport);
+ if (match)
return match;
- }
newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
- if (!newhost) {
- /* no new allocation - release reference */
- nvmet_fc_tgtport_put(tgtport);
+ if (!newhost)
return ERR_PTR(-ENOMEM);
- }
spin_lock_irqsave(&tgtport->lock, flags);
match = nvmet_fc_match_hostport(tgtport, hosthandle);
@@ -1063,6 +1054,7 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
kfree(newhost);
newhost = match;
} else {
+ nvmet_fc_tgtport_get(tgtport);
newhost->tgtport = tgtport;
newhost->hosthandle = hosthandle;
INIT_LIST_HEAD(&newhost->host_list);
@@ -1097,7 +1089,8 @@ static void
nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
nvmet_fc_tgtport_get(assoc->tgtport);
- queue_work(nvmet_wq, &assoc->del_work);
+ if (!queue_work(nvmet_wq, &assoc->del_work))
+ nvmet_fc_tgtport_put(assoc->tgtport);
}
static bool
diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c
index 5c4c4c1f535d..17c0e5ee731a 100644
--- a/drivers/nvme/target/pci-epf.c
+++ b/drivers/nvme/target/pci-epf.c
@@ -596,9 +596,6 @@ static bool nvmet_pci_epf_should_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
struct nvmet_pci_epf_irq_vector *iv = cq->iv;
bool ret;
- if (!test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
- return false;
-
/* IRQ coalescing for the admin queue is not allowed. */
if (!cq->qid)
return true;
@@ -625,7 +622,8 @@ static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
struct pci_epf *epf = nvme_epf->epf;
int ret = 0;
- if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
+ if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) ||
+ !test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
return;
mutex_lock(&ctrl->irq_lock);
@@ -656,7 +654,9 @@ static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
}
if (ret)
- dev_err(ctrl->dev, "Failed to raise IRQ (err=%d)\n", ret);
+ dev_err_ratelimited(ctrl->dev,
+ "CQ[%u]: Failed to raise IRQ (err=%d)\n",
+ cq->qid, ret);
unlock:
mutex_unlock(&ctrl->irq_lock);
@@ -1264,6 +1264,7 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
u16 status;
+ int ret;
if (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
@@ -1298,6 +1299,24 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
if (status != NVME_SC_SUCCESS)
goto err;
+ /*
+ * Map the CQ PCI address space and since PCI endpoint controllers may
+ * return a partial mapping, check that the mapping is large enough.
+ */
+ ret = nvmet_pci_epf_mem_map(ctrl->nvme_epf, cq->pci_addr, cq->pci_size,
+ &cq->pci_map);
+ if (ret) {
+ dev_err(ctrl->dev, "Failed to map CQ %u (err=%d)\n",
+ cq->qid, ret);
+ goto err_internal;
+ }
+
+ if (cq->pci_map.pci_size < cq->pci_size) {
+ dev_err(ctrl->dev, "Invalid partial mapping of queue %u\n",
+ cq->qid);
+ goto err_unmap_queue;
+ }
+
set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags);
dev_dbg(ctrl->dev, "CQ[%u]: %u entries of %zu B, IRQ vector %u\n",
@@ -1305,6 +1324,10 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
return NVME_SC_SUCCESS;
+err_unmap_queue:
+ nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map);
+err_internal:
+ status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
err:
if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
@@ -1321,7 +1344,9 @@ static u16 nvmet_pci_epf_delete_cq(struct nvmet_ctrl *tctrl, u16 cqid)
cancel_delayed_work_sync(&cq->work);
nvmet_pci_epf_drain_queue(cq);
- nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
+ if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
+ nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
+ nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map);
return NVME_SC_SUCCESS;
}
@@ -1554,36 +1579,6 @@ static void nvmet_pci_epf_free_queues(struct nvmet_pci_epf_ctrl *ctrl)
ctrl->cq = NULL;
}
-static int nvmet_pci_epf_map_queue(struct nvmet_pci_epf_ctrl *ctrl,
- struct nvmet_pci_epf_queue *queue)
-{
- struct nvmet_pci_epf *nvme_epf = ctrl->nvme_epf;
- int ret;
-
- ret = nvmet_pci_epf_mem_map(nvme_epf, queue->pci_addr,
- queue->pci_size, &queue->pci_map);
- if (ret) {
- dev_err(ctrl->dev, "Failed to map queue %u (err=%d)\n",
- queue->qid, ret);
- return ret;
- }
-
- if (queue->pci_map.pci_size < queue->pci_size) {
- dev_err(ctrl->dev, "Invalid partial mapping of queue %u\n",
- queue->qid);
- nvmet_pci_epf_mem_unmap(nvme_epf, &queue->pci_map);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static inline void nvmet_pci_epf_unmap_queue(struct nvmet_pci_epf_ctrl *ctrl,
- struct nvmet_pci_epf_queue *queue)
-{
- nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &queue->pci_map);
-}
-
static void nvmet_pci_epf_exec_iod_work(struct work_struct *work)
{
struct nvmet_pci_epf_iod *iod =
@@ -1749,11 +1744,7 @@ static void nvmet_pci_epf_cq_work(struct work_struct *work)
struct nvme_completion *cqe;
struct nvmet_pci_epf_iod *iod;
unsigned long flags;
- int ret, n = 0;
-
- ret = nvmet_pci_epf_map_queue(ctrl, cq);
- if (ret)
- goto again;
+ int ret = 0, n = 0;
while (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) && ctrl->link_up) {
@@ -1809,8 +1800,6 @@ static void nvmet_pci_epf_cq_work(struct work_struct *work)
n++;
}
- nvmet_pci_epf_unmap_queue(ctrl, cq);
-
/*
* We do not support precise IRQ coalescing time (100ns units as per
* NVMe specifications). So if we have posted completion entries without
@@ -1819,7 +1808,6 @@ static void nvmet_pci_epf_cq_work(struct work_struct *work)
if (n)
nvmet_pci_epf_raise_irq(ctrl, cq, true);
-again:
if (ret < 0)
queue_delayed_work(system_highpri_wq, &cq->work,
NVMET_PCI_EPF_CQ_RETRY_INTERVAL);
@@ -2109,11 +2097,18 @@ out_mempool_exit:
static void nvmet_pci_epf_start_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
{
+
+ dev_info(ctrl->dev, "PCI link up\n");
+ ctrl->link_up = true;
+
schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL);
}
static void nvmet_pci_epf_stop_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
{
+ dev_info(ctrl->dev, "PCI link down\n");
+ ctrl->link_up = false;
+
cancel_delayed_work_sync(&ctrl->poll_cc);
nvmet_pci_epf_disable_ctrl(ctrl, false);
@@ -2340,10 +2335,8 @@ static int nvmet_pci_epf_epc_init(struct pci_epf *epf)
if (ret)
goto out_clear_bar;
- if (!epc_features->linkup_notifier) {
- ctrl->link_up = true;
+ if (!epc_features->linkup_notifier)
nvmet_pci_epf_start_ctrl(&nvme_epf->ctrl);
- }
return 0;
@@ -2359,7 +2352,6 @@ static void nvmet_pci_epf_epc_deinit(struct pci_epf *epf)
struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
- ctrl->link_up = false;
nvmet_pci_epf_destroy_ctrl(ctrl);
nvmet_pci_epf_deinit_dma(nvme_epf);
@@ -2371,7 +2363,6 @@ static int nvmet_pci_epf_link_up(struct pci_epf *epf)
struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
- ctrl->link_up = true;
nvmet_pci_epf_start_ctrl(ctrl);
return 0;
@@ -2382,7 +2373,6 @@ static int nvmet_pci_epf_link_down(struct pci_epf *epf)
struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
- ctrl->link_up = false;
nvmet_pci_epf_stop_ctrl(ctrl);
return 0;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 4f9cac8a5abe..259ad77c03c5 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1560,6 +1560,9 @@ static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
{
struct socket *sock = queue->sock;
+ if (!queue->state_change)
+ return;
+
write_lock_bh(&sock->sk->sk_callback_lock);
sock->sk->sk_data_ready = queue->data_ready;
sock->sk->sk_state_change = queue->state_change;
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 8671b7c974b9..eceb3cdb421f 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -260,6 +260,7 @@ config NVMEM_RCAR_EFUSE
config NVMEM_RMEM
tristate "Reserved Memory Based Driver Support"
depends on HAS_IOMEM
+ select CRC32
help
This driver maps reserved memory into an nvmem device. It might be
useful to expose information left by firmware in memory.
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index fff85bbf0ecd..e206efc29a00 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -594,9 +594,11 @@ static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
cell->nbits = info->nbits;
cell->np = info->np;
- if (cell->nbits)
+ if (cell->nbits) {
cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
BITS_PER_BYTE);
+ cell->raw_len = ALIGN(cell->bytes, nvmem->word_size);
+ }
if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
dev_err(&nvmem->dev,
@@ -605,6 +607,18 @@ static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
return -EINVAL;
}
+ if (!IS_ALIGNED(cell->raw_len, nvmem->word_size)) {
+ dev_err(&nvmem->dev,
+ "cell %s raw len %zd unaligned to nvmem word size %d\n",
+ cell->name ?: "<unknown>", cell->raw_len,
+ nvmem->word_size);
+
+ if (info->raw_len)
+ return -EINVAL;
+
+ cell->raw_len = ALIGN(cell->raw_len, nvmem->word_size);
+ }
+
return 0;
}
@@ -837,7 +851,9 @@ static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_nod
if (addr && len == (2 * sizeof(u32))) {
info.bit_offset = be32_to_cpup(addr++);
info.nbits = be32_to_cpup(addr);
- if (info.bit_offset >= BITS_PER_BYTE || info.nbits < 1) {
+ if (info.bit_offset >= BITS_PER_BYTE * info.bytes ||
+ info.nbits < 1 ||
+ info.bit_offset + info.nbits > BITS_PER_BYTE * info.bytes) {
dev_err(dev, "nvmem: invalid bits on %pOF\n", child);
of_node_put(child);
return -EINVAL;
@@ -1630,21 +1646,29 @@ EXPORT_SYMBOL_GPL(nvmem_cell_put);
static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf)
{
u8 *p, *b;
- int i, extra, bit_offset = cell->bit_offset;
+ int i, extra, bytes_offset;
+ int bit_offset = cell->bit_offset;
p = b = buf;
- if (bit_offset) {
+
+ bytes_offset = bit_offset / BITS_PER_BYTE;
+ b += bytes_offset;
+ bit_offset %= BITS_PER_BYTE;
+
+ if (bit_offset % BITS_PER_BYTE) {
/* First shift */
- *b++ >>= bit_offset;
+ *p = *b++ >> bit_offset;
/* setup rest of the bytes if any */
for (i = 1; i < cell->bytes; i++) {
/* Get bits from next byte and shift them towards msb */
- *p |= *b << (BITS_PER_BYTE - bit_offset);
+ *p++ |= *b << (BITS_PER_BYTE - bit_offset);
- p = b;
- *b++ >>= bit_offset;
+ *p = *b++ >> bit_offset;
}
+ } else if (p != b) {
+ memmove(p, b, cell->bytes - bytes_offset);
+ p += cell->bytes - 1;
} else {
/* point to the msb */
p += cell->bytes - 1;
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index 116a39e804c7..a872c640b8c5 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -321,19 +321,32 @@ static int qfprom_reg_read(void *context,
unsigned int reg, void *_val, size_t bytes)
{
struct qfprom_priv *priv = context;
- u8 *val = _val;
- int i = 0, words = bytes;
+ u32 *val = _val;
void __iomem *base = priv->qfpcorrected;
+ int words = DIV_ROUND_UP(bytes, sizeof(u32));
+ int i;
if (read_raw_data && priv->qfpraw)
base = priv->qfpraw;
- while (words--)
- *val++ = readb(base + reg + i++);
+ for (i = 0; i < words; i++)
+ *val++ = readl(base + reg + i * sizeof(u32));
return 0;
}
+/* Align reads to word boundary */
+static void qfprom_fixup_dt_cell_info(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *cell)
+{
+ unsigned int byte_offset = cell->offset % sizeof(u32);
+
+ cell->bit_offset += byte_offset * BITS_PER_BYTE;
+ cell->offset -= byte_offset;
+ if (byte_offset && !cell->nbits)
+ cell->nbits = cell->bytes * BITS_PER_BYTE;
+}
+
static void qfprom_runtime_disable(void *data)
{
pm_runtime_disable(data);
@@ -358,10 +371,11 @@ static int qfprom_probe(struct platform_device *pdev)
struct nvmem_config econfig = {
.name = "qfprom",
.add_legacy_fixed_of_cells = true,
- .stride = 1,
- .word_size = 1,
+ .stride = 4,
+ .word_size = 4,
.id = NVMEM_DEVID_AUTO,
.reg_read = qfprom_reg_read,
+ .fixup_dt_cell_info = qfprom_fixup_dt_cell_info,
};
struct device *dev = &pdev->dev;
struct resource *res;
diff --git a/drivers/nvmem/rockchip-otp.c b/drivers/nvmem/rockchip-otp.c
index ebc3f0b24166..d88f12c53242 100644
--- a/drivers/nvmem/rockchip-otp.c
+++ b/drivers/nvmem/rockchip-otp.c
@@ -59,7 +59,6 @@
#define RK3588_OTPC_AUTO_EN 0x08
#define RK3588_OTPC_INT_ST 0x84
#define RK3588_OTPC_DOUT0 0x20
-#define RK3588_NO_SECURE_OFFSET 0x300
#define RK3588_NBYTES 4
#define RK3588_BURST_NUM 1
#define RK3588_BURST_SHIFT 8
@@ -69,6 +68,7 @@
struct rockchip_data {
int size;
+ int read_offset;
const char * const *clks;
int num_clks;
nvmem_reg_read_t reg_read;
@@ -196,7 +196,7 @@ static int rk3588_otp_read(void *context, unsigned int offset,
addr_start = round_down(offset, RK3588_NBYTES) / RK3588_NBYTES;
addr_end = round_up(offset + bytes, RK3588_NBYTES) / RK3588_NBYTES;
addr_len = addr_end - addr_start;
- addr_start += RK3588_NO_SECURE_OFFSET;
+ addr_start += otp->data->read_offset / RK3588_NBYTES;
buf = kzalloc(array_size(addr_len, RK3588_NBYTES), GFP_KERNEL);
if (!buf)
@@ -274,12 +274,21 @@ static const struct rockchip_data px30_data = {
.reg_read = px30_otp_read,
};
+static const struct rockchip_data rk3576_data = {
+ .size = 0x100,
+ .read_offset = 0x700,
+ .clks = px30_otp_clocks,
+ .num_clks = ARRAY_SIZE(px30_otp_clocks),
+ .reg_read = rk3588_otp_read,
+};
+
static const char * const rk3588_otp_clocks[] = {
"otp", "apb_pclk", "phy", "arb",
};
static const struct rockchip_data rk3588_data = {
.size = 0x400,
+ .read_offset = 0xc00,
.clks = rk3588_otp_clocks,
.num_clks = ARRAY_SIZE(rk3588_otp_clocks),
.reg_read = rk3588_otp_read,
@@ -295,6 +304,10 @@ static const struct of_device_id rockchip_otp_match[] = {
.data = &px30_data,
},
{
+ .compatible = "rockchip,rk3576-otp",
+ .data = &rk3576_data,
+ },
+ {
.compatible = "rockchip,rk3588-otp",
.data = &rk3588_data,
},
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index 779db058c42f..2caad365a665 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -249,25 +249,22 @@ static int adjust_local_phandle_references(const struct device_node *local_fixup
*/
int of_resolve_phandles(struct device_node *overlay)
{
- struct device_node *child, *local_fixups, *refnode;
- struct device_node *tree_symbols, *overlay_fixups;
+ struct device_node *child, *refnode;
+ struct device_node *overlay_fixups;
+ struct device_node __free(device_node) *local_fixups = NULL;
struct property *prop;
const char *refpath;
phandle phandle, phandle_delta;
int err;
- tree_symbols = NULL;
-
if (!overlay) {
pr_err("null overlay\n");
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
if (!of_node_check_flag(overlay, OF_DETACHED)) {
pr_err("overlay not detached\n");
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
phandle_delta = live_tree_max_phandle() + 1;
@@ -279,7 +276,7 @@ int of_resolve_phandles(struct device_node *overlay)
err = adjust_local_phandle_references(local_fixups, overlay, phandle_delta);
if (err)
- goto out;
+ return err;
overlay_fixups = NULL;
@@ -288,16 +285,13 @@ int of_resolve_phandles(struct device_node *overlay)
overlay_fixups = child;
}
- if (!overlay_fixups) {
- err = 0;
- goto out;
- }
+ if (!overlay_fixups)
+ return 0;
- tree_symbols = of_find_node_by_path("/__symbols__");
+ struct device_node __free(device_node) *tree_symbols = of_find_node_by_path("/__symbols__");
if (!tree_symbols) {
pr_err("no symbols in root of device tree.\n");
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
for_each_property_of_node(overlay_fixups, prop) {
@@ -311,14 +305,12 @@ int of_resolve_phandles(struct device_node *overlay)
if (err) {
pr_err("node label '%s' not found in live devicetree symbols table\n",
prop->name);
- goto out;
+ return err;
}
refnode = of_find_node_by_path(refpath);
- if (!refnode) {
- err = -ENOENT;
- goto out;
- }
+ if (!refnode)
+ return -ENOENT;
phandle = refnode->phandle;
of_node_put(refnode);
@@ -328,11 +320,8 @@ int of_resolve_phandles(struct device_node *overlay)
break;
}
-out:
if (err)
pr_err("overlay phandle fixup failed: %d\n", err);
- of_node_put(tree_symbols);
-
return err;
}
EXPORT_SYMBOL_GPL(of_resolve_phandles);
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 2fbd379923fd..5c3054aaec8c 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -203,6 +203,12 @@ config PCI_P2PDMA
P2P DMA transactions must be between devices behind the same root
port.
+ Enabling this option will reduce the entropy of x86 KASLR memory
+ regions. For example - on a 46 bit system, the entropy goes down
+ from 16 bits to 15 bits. The actual reduction in entropy depends
+ on the physical address bits, on processor features, kernel config
+ (5 level page table) and physical memory present on the system.
+
If unsure, say N.
config PCI_LABEL
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index c6b266c772c8..ec6c8dbdc5e9 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -538,4 +538,37 @@ int pci_max_pasids(struct pci_dev *pdev)
return (1 << FIELD_GET(PCI_PASID_CAP_WIDTH, supported));
}
EXPORT_SYMBOL_GPL(pci_max_pasids);
+
+/**
+ * pci_pasid_status - Check the PASID status
+ * @pdev: PCI device structure
+ *
+ * Returns a negative value when no PASID capability is present.
+ * Otherwise the value of the control register is returned.
+ * Status reported are:
+ *
+ * PCI_PASID_CTRL_ENABLE - PASID enabled
+ * PCI_PASID_CTRL_EXEC - Execute permission enabled
+ * PCI_PASID_CTRL_PRIV - Privileged mode enabled
+ */
+int pci_pasid_status(struct pci_dev *pdev)
+{
+ int pasid;
+ u16 ctrl;
+
+ if (pdev->is_virtfn)
+ pdev = pci_physfn(pdev);
+
+ pasid = pdev->pasid_cap;
+ if (!pasid)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pasid + PCI_PASID_CTRL, &ctrl);
+
+ ctrl &= PCI_PASID_CTRL_ENABLE | PCI_PASID_CTRL_EXEC |
+ PCI_PASID_CTRL_PRIV;
+
+ return ctrl;
+}
+EXPORT_SYMBOL_GPL(pci_pasid_status);
#endif /* CONFIG_PCI_PASID */
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index e41479a9ca02..c91d09502468 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -282,7 +282,7 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
u32 index;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- for (index = 0; index < pci->num_ob_windows; index++) {
+ for_each_set_bit(index, ep->ob_window_map, pci->num_ob_windows) {
if (ep->outbound_addr[index] != addr)
continue;
*atu_index = index;
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index ffaded8f2df7..ae3fd2a5dbf8 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -908,7 +908,7 @@ static int dw_pcie_pme_turn_off(struct dw_pcie *pci)
if (ret)
return ret;
- mem = ioremap(atu.cpu_addr, pci->region_align);
+ mem = ioremap(pci->pp.msg_res->start, pci->region_align);
if (!mem)
return -ENOMEM;
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index 1a3bdc01b074..bae226c779a5 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -309,8 +309,8 @@ static int brcm_pcie_encode_ibar_size(u64 size)
if (log2_in >= 12 && log2_in <= 15)
/* Covers 4KB to 32KB (inclusive) */
return (log2_in - 12) + 0x1c;
- else if (log2_in >= 16 && log2_in <= 35)
- /* Covers 64KB to 32GB, (inclusive) */
+ else if (log2_in >= 16 && log2_in <= 36)
+ /* Covers 64KB to 64GB, (inclusive) */
return log2_in - 15;
/* Something is awry so disable */
return 0;
@@ -1947,3 +1947,4 @@ module_platform_driver(brcm_pcie_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom STB PCIe RC driver");
MODULE_AUTHOR("Broadcom");
+MODULE_SOFTDEP("pre: irq_bcm2712_mip");
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
index dc8ecdbee56c..163d805673d6 100644
--- a/drivers/pci/controller/pcie-xilinx-cpm.c
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -538,7 +538,8 @@ static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port,
if (IS_ERR(port->cfg))
return PTR_ERR(port->cfg);
- if (port->variant->version == CPM5) {
+ if (port->variant->version == CPM5 ||
+ port->variant->version == CPM5_HOST1) {
port->reg_base = devm_platform_ioremap_resource_byname(pdev,
"cpm_csr");
if (IS_ERR(port->reg_base))
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 94ceec50a2b9..8df064b62a2f 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -17,6 +17,8 @@
#include <linux/rculist.h>
#include <linux/rcupdate.h>
+#include <xen/xen.h>
+
#include <asm/irqdomain.h>
#define VMD_CFGBAR 0
@@ -970,6 +972,24 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct vmd_dev *vmd;
int err;
+ if (xen_domain()) {
+ /*
+ * Xen doesn't have knowledge about devices in the VMD bus
+ * because the config space of devices behind the VMD bridge is
+ * not known to Xen, and hence Xen cannot discover or configure
+ * them in any way.
+ *
+ * Bypass of MSI remapping won't work in that case as direct
+ * write by Linux to the MSI entries won't result in functional
+ * interrupts, as Xen is the entity that manages the host
+ * interrupt controller and must configure interrupts. However
+ * multiplexing of interrupts by the VMD bridge will work under
+ * Xen, so force the usage of that mode which must always be
+ * supported by VMD bridges.
+ */
+ features &= ~VMD_FEAT_CAN_BYPASS_MSI_REMAP;
+ }
+
if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
return -ENOMEM;
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
index 54286a40bdfb..6643a88c7a0c 100644
--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -125,7 +125,7 @@ static const struct pci_epf_mhi_ep_info sm8450_info = {
static struct pci_epf_header sa8775p_header = {
.vendorid = PCI_VENDOR_ID_QCOM,
- .deviceid = 0x0306, /* FIXME: Update deviceid for sa8775p EP */
+ .deviceid = 0x0116,
.baseclass_code = PCI_CLASS_OTHERS,
.interrupt_pin = PCI_INTERRUPT_INTA,
};
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 2409787cf56d..bce3ae2c0f65 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -738,6 +738,7 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
if (ret) {
pci_epf_free_space(epf, epf_test->reg[bar], bar,
PRIMARY_INTERFACE);
+ epf_test->reg[bar] = NULL;
dev_err(dev, "Failed to set BAR%d\n", bar);
if (bar == test_reg_bar)
return ret;
@@ -929,6 +930,7 @@ static void pci_epf_test_free_space(struct pci_epf *epf)
pci_epf_free_space(epf, epf_test->reg[bar], bar,
PRIMARY_INTERFACE);
+ epf_test->reg[bar] = NULL;
}
}
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 055518ee354d..e9e9aaa91770 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -59,7 +59,6 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
if (pdev && pci_num_vf(pdev)) {
- pci_dev_put(pdev);
rc = -EBUSY;
goto out;
}
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index 2f647cac4cae..8b8848788618 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -10,12 +10,12 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include "../pci.h"
#include "msi.h"
int pci_msi_enable = 1;
-int pci_msi_ignore_mask;
/**
* pci_msi_supported - check whether MSI may be enabled on a device
@@ -295,8 +295,7 @@ static int msi_setup_msi_desc(struct pci_dev *dev, int nvec,
/* Lies, damned lies, and MSIs */
if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
control |= PCI_MSI_FLAGS_MASKBIT;
- /* Respect XEN's mask disabling */
- if (pci_msi_ignore_mask)
+ if (pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY))
control &= ~PCI_MSI_FLAGS_MASKBIT;
desc.nvec_used = nvec;
@@ -609,12 +608,16 @@ void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc)
desc->pci.msi_attrib.is_64 = 1;
desc->pci.msi_attrib.default_irq = dev->irq;
desc->pci.mask_base = dev->msix_base;
- desc->pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
- !desc->pci.msi_attrib.is_virtual;
- if (desc->pci.msi_attrib.can_mask) {
+
+ if (!pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY) &&
+ !desc->pci.msi_attrib.is_virtual) {
void __iomem *addr = pci_msix_desc_addr(desc);
+ desc->pci.msi_attrib.can_mask = 1;
+ /* Workaround for SUN NIU insanity, which requires write before read */
+ if (dev->dev_flags & PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST)
+ writel(0, addr + PCI_MSIX_ENTRY_DATA);
desc->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
}
@@ -659,9 +662,6 @@ static void msix_mask_all(void __iomem *base, int tsize)
u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
int i;
- if (pci_msi_ignore_mask)
- return;
-
for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
@@ -744,15 +744,17 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
/* Disable INTX */
pci_intx_for_msi(dev, 0);
- /*
- * Ensure that all table entries are masked to prevent
- * stale entries from firing in a crash kernel.
- *
- * Done late to deal with a broken Marvell NVME device
- * which takes the MSI-X mask bits into account even
- * when MSI-X is disabled, which prevents MSI delivery.
- */
- msix_mask_all(dev->msix_base, tsize);
+ if (!pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY)) {
+ /*
+ * Ensure that all table entries are masked to prevent
+ * stale entries from firing in a crash kernel.
+ *
+ * Done late to deal with a broken Marvell NVME device
+ * which takes the MSI-X mask bits into account even
+ * when MSI-X is disabled, which prevents MSI delivery.
+ */
+ msix_mask_all(dev->msix_base, tsize);
+ }
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
pcibios_free_irq(dev);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 8707c5b08cf3..477eb07bfbca 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -814,11 +814,9 @@ static resource_size_t calculate_iosize(resource_size_t size,
size = (size & 0xff) + ((size & ~0xffUL) << 2);
#endif
size = size + size1;
- if (size < old_size)
- size = old_size;
- size = ALIGN(max(size, add_size) + children_add_size, align);
- return size;
+ size = max(size, add_size) + children_add_size;
+ return ALIGN(max(size, old_size), align);
}
static resource_size_t calculate_memsize(resource_size_t size,
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index ef959e66db7c..0ba0f545b118 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -727,8 +727,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
if ((chan == 5 && cmn->rsp_vc_num < 2) ||
(chan == 6 && cmn->dat_vc_num < 2) ||
- (chan == 7 && cmn->snp_vc_num < 2) ||
- (chan == 8 && cmn->req_vc_num < 2))
+ (chan == 7 && cmn->req_vc_num < 2) ||
+ (chan == 8 && cmn->snp_vc_num < 2))
return 0;
}
@@ -884,8 +884,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
_CMN_EVENT_XP(pub_##_name, (_event) | (4 << 5)), \
_CMN_EVENT_XP(rsp2_##_name, (_event) | (5 << 5)), \
_CMN_EVENT_XP(dat2_##_name, (_event) | (6 << 5)), \
- _CMN_EVENT_XP(snp2_##_name, (_event) | (7 << 5)), \
- _CMN_EVENT_XP(req2_##_name, (_event) | (8 << 5))
+ _CMN_EVENT_XP(req2_##_name, (_event) | (7 << 5)), \
+ _CMN_EVENT_XP(snp2_##_name, (_event) | (8 << 5))
#define CMN_EVENT_XP_DAT(_name, _event) \
_CMN_EVENT_XP_PORT(dat_##_name, (_event) | (3 << 5)), \
@@ -2557,6 +2557,7 @@ static int arm_cmn_probe(struct platform_device *pdev)
cmn->dev = &pdev->dev;
cmn->part = (unsigned long)device_get_match_data(cmn->dev);
+ cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev));
platform_set_drvdata(pdev, cmn);
if (cmn->part == PART_CMN600 && has_acpi_companion(cmn->dev)) {
@@ -2584,7 +2585,6 @@ static int arm_cmn_probe(struct platform_device *pdev)
if (err)
return err;
- cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev));
cmn->pmu = (struct pmu) {
.module = THIS_MODULE,
.parent = cmn->dev,
@@ -2650,6 +2650,7 @@ static const struct acpi_device_id arm_cmn_acpi_match[] = {
{ "ARMHC600", PART_CMN600 },
{ "ARMHC650" },
{ "ARMHC700" },
+ { "ARMHC003" },
{}
};
MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match);
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index 0e360feb3432..9ebc950559c0 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -825,10 +825,10 @@ static void armv8pmu_start(struct arm_pmu *cpu_pmu)
else
armv8pmu_disable_user_access();
+ kvm_vcpu_pmu_resync_el0();
+
/* Enable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
-
- kvm_vcpu_pmu_resync_el0();
}
static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 8dfdce605a90..067316dfcd83 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -405,13 +405,14 @@ EXPORT_SYMBOL_GPL(phy_power_off);
int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode)
{
- int ret;
+ int ret = 0;
- if (!phy || !phy->ops->set_mode)
+ if (!phy)
return 0;
mutex_lock(&phy->mutex);
- ret = phy->ops->set_mode(phy, mode, submode);
+ if (phy->ops->set_mode)
+ ret = phy->ops->set_mode(phy, mode, submode);
if (!ret)
phy->attrs.mode = mode;
mutex_unlock(&phy->mutex);
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index 775f4f973a6c..9fdf17e0848a 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -9,6 +9,7 @@
* Copyright (C) 2014 Cogent Embedded, Inc.
*/
+#include <linux/cleanup.h>
#include <linux/extcon-provider.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -107,7 +108,6 @@ struct rcar_gen3_phy {
struct rcar_gen3_chan *ch;
u32 int_enable_bits;
bool initialized;
- bool otg_initialized;
bool powered;
};
@@ -119,9 +119,8 @@ struct rcar_gen3_chan {
struct regulator *vbus;
struct reset_control *rstc;
struct work_struct work;
- struct mutex lock; /* protects rphys[...].powered */
+ spinlock_t lock; /* protects access to hardware and driver data structure. */
enum usb_dr_mode dr_mode;
- int irq;
u32 obint_enable_bits;
bool extcon_host;
bool is_otg_channel;
@@ -320,16 +319,15 @@ static bool rcar_gen3_is_any_rphy_initialized(struct rcar_gen3_chan *ch)
return false;
}
-static bool rcar_gen3_needs_init_otg(struct rcar_gen3_chan *ch)
+static bool rcar_gen3_is_any_otg_rphy_initialized(struct rcar_gen3_chan *ch)
{
- int i;
-
- for (i = 0; i < NUM_OF_PHYS; i++) {
- if (ch->rphys[i].otg_initialized)
- return false;
+ for (enum rcar_gen3_phy_index i = PHY_INDEX_BOTH_HC; i <= PHY_INDEX_EHCI;
+ i++) {
+ if (ch->rphys[i].initialized)
+ return true;
}
- return true;
+ return false;
}
static bool rcar_gen3_are_all_rphys_power_off(struct rcar_gen3_chan *ch)
@@ -351,7 +349,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
bool is_b_device;
enum phy_mode cur_mode, new_mode;
- if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch))
+ guard(spinlock_irqsave)(&ch->lock);
+
+ if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch))
return -EIO;
if (sysfs_streq(buf, "host"))
@@ -389,7 +389,7 @@ static ssize_t role_show(struct device *dev, struct device_attribute *attr,
{
struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
- if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch))
+ if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch))
return -EIO;
return sprintf(buf, "%s\n", rcar_gen3_is_host(ch) ? "host" :
@@ -402,6 +402,9 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
void __iomem *usb2_base = ch->base;
u32 val;
+ if (!ch->is_otg_channel || rcar_gen3_is_any_otg_rphy_initialized(ch))
+ return;
+
/* Should not use functions of read-modify-write a register */
val = readl(usb2_base + USB2_LINECTRL1);
val = (val & ~USB2_LINECTRL1_DP_RPD) | USB2_LINECTRL1_DPRPD_EN |
@@ -415,7 +418,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
val = readl(usb2_base + USB2_ADPCTRL);
writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL);
}
- msleep(20);
+ mdelay(20);
writel(0xffffffff, usb2_base + USB2_OBINTSTA);
writel(ch->obint_enable_bits, usb2_base + USB2_OBINTEN);
@@ -427,16 +430,27 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
{
struct rcar_gen3_chan *ch = _ch;
void __iomem *usb2_base = ch->base;
- u32 status = readl(usb2_base + USB2_OBINTSTA);
+ struct device *dev = ch->dev;
irqreturn_t ret = IRQ_NONE;
+ u32 status;
+
+ pm_runtime_get_noresume(dev);
+
+ if (pm_runtime_suspended(dev))
+ goto rpm_put;
- if (status & ch->obint_enable_bits) {
- dev_vdbg(ch->dev, "%s: %08x\n", __func__, status);
- writel(ch->obint_enable_bits, usb2_base + USB2_OBINTSTA);
- rcar_gen3_device_recognition(ch);
- ret = IRQ_HANDLED;
+ scoped_guard(spinlock, &ch->lock) {
+ status = readl(usb2_base + USB2_OBINTSTA);
+ if (status & ch->obint_enable_bits) {
+ dev_vdbg(dev, "%s: %08x\n", __func__, status);
+ writel(ch->obint_enable_bits, usb2_base + USB2_OBINTSTA);
+ rcar_gen3_device_recognition(ch);
+ ret = IRQ_HANDLED;
+ }
}
+rpm_put:
+ pm_runtime_put_noidle(dev);
return ret;
}
@@ -446,32 +460,23 @@ static int rcar_gen3_phy_usb2_init(struct phy *p)
struct rcar_gen3_chan *channel = rphy->ch;
void __iomem *usb2_base = channel->base;
u32 val;
- int ret;
- if (!rcar_gen3_is_any_rphy_initialized(channel) && channel->irq >= 0) {
- INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
- ret = request_irq(channel->irq, rcar_gen3_phy_usb2_irq,
- IRQF_SHARED, dev_name(channel->dev), channel);
- if (ret < 0) {
- dev_err(channel->dev, "No irq handler (%d)\n", channel->irq);
- return ret;
- }
- }
+ guard(spinlock_irqsave)(&channel->lock);
/* Initialize USB2 part */
val = readl(usb2_base + USB2_INT_ENABLE);
val |= USB2_INT_ENABLE_UCOM_INTEN | rphy->int_enable_bits;
writel(val, usb2_base + USB2_INT_ENABLE);
- writel(USB2_SPD_RSM_TIMSET_INIT, usb2_base + USB2_SPD_RSM_TIMSET);
- writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET);
-
- /* Initialize otg part */
- if (channel->is_otg_channel) {
- if (rcar_gen3_needs_init_otg(channel))
- rcar_gen3_init_otg(channel);
- rphy->otg_initialized = true;
+
+ if (!rcar_gen3_is_any_rphy_initialized(channel)) {
+ writel(USB2_SPD_RSM_TIMSET_INIT, usb2_base + USB2_SPD_RSM_TIMSET);
+ writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET);
}
+ /* Initialize otg part (only if we initialize a PHY with IRQs). */
+ if (rphy->int_enable_bits)
+ rcar_gen3_init_otg(channel);
+
rphy->initialized = true;
return 0;
@@ -484,10 +489,9 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p)
void __iomem *usb2_base = channel->base;
u32 val;
- rphy->initialized = false;
+ guard(spinlock_irqsave)(&channel->lock);
- if (channel->is_otg_channel)
- rphy->otg_initialized = false;
+ rphy->initialized = false;
val = readl(usb2_base + USB2_INT_ENABLE);
val &= ~rphy->int_enable_bits;
@@ -495,9 +499,6 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p)
val &= ~USB2_INT_ENABLE_UCOM_INTEN;
writel(val, usb2_base + USB2_INT_ENABLE);
- if (channel->irq >= 0 && !rcar_gen3_is_any_rphy_initialized(channel))
- free_irq(channel->irq, channel);
-
return 0;
}
@@ -509,16 +510,17 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p)
u32 val;
int ret = 0;
- mutex_lock(&channel->lock);
- if (!rcar_gen3_are_all_rphys_power_off(channel))
- goto out;
-
if (channel->vbus) {
ret = regulator_enable(channel->vbus);
if (ret)
- goto out;
+ return ret;
}
+ guard(spinlock_irqsave)(&channel->lock);
+
+ if (!rcar_gen3_are_all_rphys_power_off(channel))
+ goto out;
+
val = readl(usb2_base + USB2_USBCTR);
val |= USB2_USBCTR_PLL_RST;
writel(val, usb2_base + USB2_USBCTR);
@@ -528,7 +530,6 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p)
out:
/* The powered flag should be set for any other phys anyway */
rphy->powered = true;
- mutex_unlock(&channel->lock);
return 0;
}
@@ -539,18 +540,20 @@ static int rcar_gen3_phy_usb2_power_off(struct phy *p)
struct rcar_gen3_chan *channel = rphy->ch;
int ret = 0;
- mutex_lock(&channel->lock);
- rphy->powered = false;
+ scoped_guard(spinlock_irqsave, &channel->lock) {
+ rphy->powered = false;
- if (!rcar_gen3_are_all_rphys_power_off(channel))
- goto out;
+ if (rcar_gen3_are_all_rphys_power_off(channel)) {
+ u32 val = readl(channel->base + USB2_USBCTR);
+
+ val |= USB2_USBCTR_PLL_RST;
+ writel(val, channel->base + USB2_USBCTR);
+ }
+ }
if (channel->vbus)
ret = regulator_disable(channel->vbus);
-out:
- mutex_unlock(&channel->lock);
-
return ret;
}
@@ -703,7 +706,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct rcar_gen3_chan *channel;
struct phy_provider *provider;
- int ret = 0, i;
+ int ret = 0, i, irq;
if (!dev->of_node) {
dev_err(dev, "This driver needs device tree\n");
@@ -719,8 +722,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
return PTR_ERR(channel->base);
channel->obint_enable_bits = USB2_OBINT_BITS;
- /* get irq number here and request_irq for OTG in phy_init */
- channel->irq = platform_get_irq_optional(pdev, 0);
channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node);
if (channel->dr_mode != USB_DR_MODE_UNKNOWN) {
channel->is_otg_channel = true;
@@ -763,7 +764,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
if (phy_data->no_adp_ctrl)
channel->obint_enable_bits = USB2_OBINT_IDCHG_EN;
- mutex_init(&channel->lock);
+ spin_lock_init(&channel->lock);
for (i = 0; i < NUM_OF_PHYS; i++) {
channel->rphys[i].phy = devm_phy_create(dev, NULL,
phy_data->phy_usb2_ops);
@@ -789,6 +790,20 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
channel->vbus = NULL;
}
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq < 0 && irq != -ENXIO) {
+ ret = irq;
+ goto error;
+ } else if (irq > 0) {
+ INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
+ ret = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq,
+ IRQF_SHARED, dev_name(dev), channel);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request irq (%d)\n", irq);
+ goto error;
+ }
+ }
+
provider = devm_of_phy_provider_register(dev, rcar_gen3_phy_usb2_xlate);
if (IS_ERR(provider)) {
dev_err(dev, "Failed to register PHY provider\n");
diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
index 2fb4f297fda3..28a4235bfd5f 100644
--- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
+++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
@@ -94,8 +94,8 @@
#define LCPLL_ALONE_MODE BIT(1)
/* CMN_REG(0097) */
#define DIG_CLK_SEL BIT(1)
-#define ROPLL_REF BIT(1)
-#define LCPLL_REF 0
+#define LCPLL_REF BIT(1)
+#define ROPLL_REF 0
/* CMN_REG(0099) */
#define CMN_ROPLL_ALONE_MODE BIT(2)
#define ROPLL_ALONE_MODE BIT(2)
@@ -325,6 +325,8 @@ static const struct ropll_config ropll_tmds_cfg[] = {
1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
{ 650000, 162, 162, 1, 1, 11, 1, 1, 1, 1, 1, 1, 1, 54, 0, 16, 4, 1,
1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
+ { 502500, 84, 84, 1, 1, 7, 1, 1, 1, 1, 1, 1, 1, 11, 1, 4, 5,
+ 4, 11, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
{ 337500, 0x70, 0x70, 1, 1, 0xf, 1, 1, 1, 1, 1, 1, 1, 0x2, 0, 0x01, 5,
1, 1, 1, 0, 0x20, 0x0c, 1, 0x0e, 0, 0, },
{ 400000, 100, 100, 1, 1, 11, 1, 1, 0, 1, 0, 1, 1, 0x9, 0, 0x05, 0,
diff --git a/drivers/phy/rockchip/phy-rockchip-usbdp.c b/drivers/phy/rockchip/phy-rockchip-usbdp.c
index 5b1e8a3806ed..fff04e0fbd80 100644
--- a/drivers/phy/rockchip/phy-rockchip-usbdp.c
+++ b/drivers/phy/rockchip/phy-rockchip-usbdp.c
@@ -187,6 +187,8 @@ struct rk_udphy {
u32 dp_aux_din_sel;
bool dp_sink_hpd_sel;
bool dp_sink_hpd_cfg;
+ unsigned int link_rate;
+ unsigned int lanes;
u8 bw;
int id;
@@ -1045,7 +1047,6 @@ static int rk_udphy_dp_phy_init(struct phy *phy)
mutex_lock(&udphy->mutex);
udphy->dp_in_use = true;
- rk_udphy_dp_hpd_event_trigger(udphy, udphy->dp_sink_hpd_cfg);
mutex_unlock(&udphy->mutex);
@@ -1103,15 +1104,19 @@ static int rk_udphy_dp_phy_power_off(struct phy *phy)
return 0;
}
-static int rk_udphy_dp_phy_verify_link_rate(unsigned int link_rate)
+/*
+ * Verify link rate
+ */
+static int rk_udphy_dp_phy_verify_link_rate(struct rk_udphy *udphy,
+ struct phy_configure_opts_dp *dp)
{
- switch (link_rate) {
+ switch (dp->link_rate) {
case 1620:
case 2700:
case 5400:
case 8100:
+ udphy->link_rate = dp->link_rate;
break;
-
default:
return -EINVAL;
}
@@ -1119,45 +1124,44 @@ static int rk_udphy_dp_phy_verify_link_rate(unsigned int link_rate)
return 0;
}
-static int rk_udphy_dp_phy_verify_config(struct rk_udphy *udphy,
- struct phy_configure_opts_dp *dp)
+static int rk_udphy_dp_phy_verify_lanes(struct rk_udphy *udphy,
+ struct phy_configure_opts_dp *dp)
{
- int i, ret;
-
- /* If changing link rate was required, verify it's supported. */
- ret = rk_udphy_dp_phy_verify_link_rate(dp->link_rate);
- if (ret)
- return ret;
-
- /* Verify lane count. */
switch (dp->lanes) {
case 1:
case 2:
case 4:
/* valid lane count. */
+ udphy->lanes = dp->lanes;
break;
default:
return -EINVAL;
}
- /*
- * If changing voltages is required, check swing and pre-emphasis
- * levels, per-lane.
- */
- if (dp->set_voltages) {
- /* Lane count verified previously. */
- for (i = 0; i < dp->lanes; i++) {
- if (dp->voltage[i] > 3 || dp->pre[i] > 3)
- return -EINVAL;
+ return 0;
+}
- /*
- * Sum of voltage swing and pre-emphasis levels cannot
- * exceed 3.
- */
- if (dp->voltage[i] + dp->pre[i] > 3)
- return -EINVAL;
- }
+/*
+ * If changing voltages is required, check swing and pre-emphasis
+ * levels, per-lane.
+ */
+static int rk_udphy_dp_phy_verify_voltages(struct rk_udphy *udphy,
+ struct phy_configure_opts_dp *dp)
+{
+ int i;
+
+ /* Lane count verified previously. */
+ for (i = 0; i < udphy->lanes; i++) {
+ if (dp->voltage[i] > 3 || dp->pre[i] > 3)
+ return -EINVAL;
+
+ /*
+ * Sum of voltage swing and pre-emphasis levels cannot
+ * exceed 3.
+ */
+ if (dp->voltage[i] + dp->pre[i] > 3)
+ return -EINVAL;
}
return 0;
@@ -1197,9 +1201,23 @@ static int rk_udphy_dp_phy_configure(struct phy *phy,
u32 i, val, lane;
int ret;
- ret = rk_udphy_dp_phy_verify_config(udphy, dp);
- if (ret)
- return ret;
+ if (dp->set_rate) {
+ ret = rk_udphy_dp_phy_verify_link_rate(udphy, dp);
+ if (ret)
+ return ret;
+ }
+
+ if (dp->set_lanes) {
+ ret = rk_udphy_dp_phy_verify_lanes(udphy, dp);
+ if (ret)
+ return ret;
+ }
+
+ if (dp->set_voltages) {
+ ret = rk_udphy_dp_phy_verify_voltages(udphy, dp);
+ if (ret)
+ return ret;
+ }
if (dp->set_rate) {
regmap_update_bits(udphy->pma_regmap, CMN_DP_RSTN_OFFSET,
@@ -1244,9 +1262,9 @@ static int rk_udphy_dp_phy_configure(struct phy *phy,
}
if (dp->set_voltages) {
- for (i = 0; i < dp->lanes; i++) {
+ for (i = 0; i < udphy->lanes; i++) {
lane = udphy->dp_lane_sel[i];
- switch (dp->link_rate) {
+ switch (udphy->link_rate) {
case 1620:
case 2700:
regmap_update_bits(udphy->pma_regmap,
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index 46b8f6987c62..28d02ae60cc1 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -1513,8 +1513,11 @@ static const struct exynos5_usbdrd_phy_tuning gs101_tunes_pipe3_preinit[] = {
PHY_TUNING_ENTRY_PMA(0x09e0, -1, 0x00),
PHY_TUNING_ENTRY_PMA(0x09e4, -1, 0x36),
PHY_TUNING_ENTRY_PMA(0x1e7c, -1, 0x06),
- PHY_TUNING_ENTRY_PMA(0x1e90, -1, 0x00),
- PHY_TUNING_ENTRY_PMA(0x1e94, -1, 0x36),
+ PHY_TUNING_ENTRY_PMA(0x19e0, -1, 0x00),
+ PHY_TUNING_ENTRY_PMA(0x19e4, -1, 0x36),
+ /* fix bootloader bug */
+ PHY_TUNING_ENTRY_PMA(0x1e90, -1, 0x02),
+ PHY_TUNING_ENTRY_PMA(0x1e94, -1, 0x0b),
/* improve LVCC */
PHY_TUNING_ENTRY_PMA(0x08f0, -1, 0x30),
PHY_TUNING_ENTRY_PMA(0x18f0, -1, 0x30),
diff --git a/drivers/phy/starfive/phy-jh7110-usb.c b/drivers/phy/starfive/phy-jh7110-usb.c
index cb5454fbe2c8..b505d89860b4 100644
--- a/drivers/phy/starfive/phy-jh7110-usb.c
+++ b/drivers/phy/starfive/phy-jh7110-usb.c
@@ -18,6 +18,8 @@
#include <linux/usb/of.h>
#define USB_125M_CLK_RATE 125000000
+#define USB_CLK_MODE_OFF 0x0
+#define USB_CLK_MODE_RX_NORMAL_PWR BIT(1)
#define USB_LS_KEEPALIVE_OFF 0x4
#define USB_LS_KEEPALIVE_ENABLE BIT(4)
@@ -78,6 +80,7 @@ static int jh7110_usb2_phy_init(struct phy *_phy)
{
struct jh7110_usb2_phy *phy = phy_get_drvdata(_phy);
int ret;
+ unsigned int val;
ret = clk_set_rate(phy->usb_125m_clk, USB_125M_CLK_RATE);
if (ret)
@@ -87,6 +90,10 @@ static int jh7110_usb2_phy_init(struct phy *_phy)
if (ret)
return ret;
+ val = readl(phy->regs + USB_CLK_MODE_OFF);
+ val |= USB_CLK_MODE_RX_NORMAL_PWR;
+ writel(val, phy->regs + USB_CLK_MODE_OFF);
+
return 0;
}
diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
index fae6242aa730..23a23f2d64e5 100644
--- a/drivers/phy/tegra/xusb-tegra186.c
+++ b/drivers/phy/tegra/xusb-tegra186.c
@@ -237,6 +237,8 @@
#define DATA0_VAL_PD BIT(1)
#define USE_XUSB_AO BIT(4)
+#define TEGRA_UTMI_PAD_MAX 4
+
#define TEGRA186_LANE(_name, _offset, _shift, _mask, _type) \
{ \
.name = _name, \
@@ -269,7 +271,7 @@ struct tegra186_xusb_padctl {
/* UTMI bias and tracking */
struct clk *usb2_trk_clk;
- unsigned int bias_pad_enable;
+ DECLARE_BITMAP(utmi_pad_enabled, TEGRA_UTMI_PAD_MAX);
/* padctl context */
struct tegra186_xusb_padctl_context context;
@@ -603,12 +605,8 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl)
u32 value;
int err;
- mutex_lock(&padctl->lock);
-
- if (priv->bias_pad_enable++ > 0) {
- mutex_unlock(&padctl->lock);
+ if (!bitmap_empty(priv->utmi_pad_enabled, TEGRA_UTMI_PAD_MAX))
return;
- }
err = clk_prepare_enable(priv->usb2_trk_clk);
if (err < 0)
@@ -658,8 +656,6 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl)
} else {
clk_disable_unprepare(priv->usb2_trk_clk);
}
-
- mutex_unlock(&padctl->lock);
}
static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl)
@@ -667,17 +663,8 @@ static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl)
struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
u32 value;
- mutex_lock(&padctl->lock);
-
- if (WARN_ON(priv->bias_pad_enable == 0)) {
- mutex_unlock(&padctl->lock);
- return;
- }
-
- if (--priv->bias_pad_enable > 0) {
- mutex_unlock(&padctl->lock);
+ if (!bitmap_empty(priv->utmi_pad_enabled, TEGRA_UTMI_PAD_MAX))
return;
- }
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
value |= USB2_PD_TRK;
@@ -690,13 +677,13 @@ static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl)
clk_disable_unprepare(priv->usb2_trk_clk);
}
- mutex_unlock(&padctl->lock);
}
static void tegra186_utmi_pad_power_on(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
struct tegra_xusb_usb2_port *port;
struct device *dev = padctl->dev;
unsigned int index = lane->index;
@@ -705,9 +692,16 @@ static void tegra186_utmi_pad_power_on(struct phy *phy)
if (!phy)
return;
+ mutex_lock(&padctl->lock);
+ if (test_bit(index, priv->utmi_pad_enabled)) {
+ mutex_unlock(&padctl->lock);
+ return;
+ }
+
port = tegra_xusb_find_usb2_port(padctl, index);
if (!port) {
dev_err(dev, "no port found for USB2 lane %u\n", index);
+ mutex_unlock(&padctl->lock);
return;
}
@@ -724,18 +718,28 @@ static void tegra186_utmi_pad_power_on(struct phy *phy)
value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
value &= ~USB2_OTG_PD_DR;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
+
+ set_bit(index, priv->utmi_pad_enabled);
+ mutex_unlock(&padctl->lock);
}
static void tegra186_utmi_pad_power_down(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra186_xusb_padctl *priv = to_tegra186_xusb_padctl(padctl);
unsigned int index = lane->index;
u32 value;
if (!phy)
return;
+ mutex_lock(&padctl->lock);
+ if (!test_bit(index, priv->utmi_pad_enabled)) {
+ mutex_unlock(&padctl->lock);
+ return;
+ }
+
dev_dbg(padctl->dev, "power down UTMI pad %u\n", index);
value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
@@ -748,7 +752,11 @@ static void tegra186_utmi_pad_power_down(struct phy *phy)
udelay(2);
+ clear_bit(index, priv->utmi_pad_enabled);
+
tegra186_utmi_bias_pad_power_off(padctl);
+
+ mutex_unlock(&padctl->lock);
}
static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 79d4814d758d..c89df95aa6ca 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -548,16 +548,16 @@ static int tegra_xusb_port_init(struct tegra_xusb_port *port,
err = dev_set_name(&port->dev, "%s-%u", name, index);
if (err < 0)
- goto unregister;
+ goto put_device;
err = device_add(&port->dev);
if (err < 0)
- goto unregister;
+ goto put_device;
return 0;
-unregister:
- device_unregister(&port->dev);
+put_device:
+ put_device(&port->dev);
return err;
}
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
index cf6efa9c0364..a039b490cdb8 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
@@ -72,7 +72,7 @@ static enum bcm281xx_pin_type hdmi_pin = BCM281XX_PIN_TYPE_HDMI;
struct bcm281xx_pin_function {
const char *name;
const char * const *groups;
- const unsigned ngroups;
+ const unsigned int ngroups;
};
/*
@@ -84,10 +84,10 @@ struct bcm281xx_pinctrl_data {
/* List of all pins */
const struct pinctrl_pin_desc *pins;
- const unsigned npins;
+ const unsigned int npins;
const struct bcm281xx_pin_function *functions;
- const unsigned nfunctions;
+ const unsigned int nfunctions;
struct regmap *regmap;
};
@@ -941,7 +941,7 @@ static struct bcm281xx_pinctrl_data bcm281xx_pinctrl = {
};
static inline enum bcm281xx_pin_type pin_type_get(struct pinctrl_dev *pctldev,
- unsigned pin)
+ unsigned int pin)
{
struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
@@ -985,7 +985,7 @@ static int bcm281xx_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
}
static const char *bcm281xx_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
- unsigned group)
+ unsigned int group)
{
struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
@@ -993,9 +993,9 @@ static const char *bcm281xx_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
}
static int bcm281xx_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
- unsigned group,
+ unsigned int group,
const unsigned **pins,
- unsigned *num_pins)
+ unsigned int *num_pins)
{
struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
@@ -1007,7 +1007,7 @@ static int bcm281xx_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
static void bcm281xx_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s,
- unsigned offset)
+ unsigned int offset)
{
seq_printf(s, " %s", dev_name(pctldev->dev));
}
@@ -1029,7 +1029,7 @@ static int bcm281xx_pinctrl_get_fcns_count(struct pinctrl_dev *pctldev)
}
static const char *bcm281xx_pinctrl_get_fcn_name(struct pinctrl_dev *pctldev,
- unsigned function)
+ unsigned int function)
{
struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
@@ -1037,9 +1037,9 @@ static const char *bcm281xx_pinctrl_get_fcn_name(struct pinctrl_dev *pctldev,
}
static int bcm281xx_pinctrl_get_fcn_groups(struct pinctrl_dev *pctldev,
- unsigned function,
+ unsigned int function,
const char * const **groups,
- unsigned * const num_groups)
+ unsigned int * const num_groups)
{
struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
@@ -1050,8 +1050,8 @@ static int bcm281xx_pinctrl_get_fcn_groups(struct pinctrl_dev *pctldev,
}
static int bcm281xx_pinmux_set(struct pinctrl_dev *pctldev,
- unsigned function,
- unsigned group)
+ unsigned int function,
+ unsigned int group)
{
struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
const struct bcm281xx_pin_function *f = &pdata->functions[function];
@@ -1082,7 +1082,7 @@ static const struct pinmux_ops bcm281xx_pinctrl_pinmux_ops = {
};
static int bcm281xx_pinctrl_pin_config_get(struct pinctrl_dev *pctldev,
- unsigned pin,
+ unsigned int pin,
unsigned long *config)
{
return -ENOTSUPP;
@@ -1091,9 +1091,9 @@ static int bcm281xx_pinctrl_pin_config_get(struct pinctrl_dev *pctldev,
/* Goes through the configs and update register val/mask */
static int bcm281xx_std_pin_update(struct pinctrl_dev *pctldev,
- unsigned pin,
+ unsigned int pin,
unsigned long *configs,
- unsigned num_configs,
+ unsigned int num_configs,
u32 *val,
u32 *mask)
{
@@ -1207,9 +1207,9 @@ static const u16 bcm281xx_pullup_map[] = {
/* Goes through the configs and update register val/mask */
static int bcm281xx_i2c_pin_update(struct pinctrl_dev *pctldev,
- unsigned pin,
+ unsigned int pin,
unsigned long *configs,
- unsigned num_configs,
+ unsigned int num_configs,
u32 *val,
u32 *mask)
{
@@ -1277,9 +1277,9 @@ static int bcm281xx_i2c_pin_update(struct pinctrl_dev *pctldev,
/* Goes through the configs and update register val/mask */
static int bcm281xx_hdmi_pin_update(struct pinctrl_dev *pctldev,
- unsigned pin,
+ unsigned int pin,
unsigned long *configs,
- unsigned num_configs,
+ unsigned int num_configs,
u32 *val,
u32 *mask)
{
@@ -1321,9 +1321,9 @@ static int bcm281xx_hdmi_pin_update(struct pinctrl_dev *pctldev,
}
static int bcm281xx_pinctrl_pin_config_set(struct pinctrl_dev *pctldev,
- unsigned pin,
+ unsigned int pin,
unsigned long *configs,
- unsigned num_configs)
+ unsigned int num_configs)
{
struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
enum bcm281xx_pin_type pin_type;
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 6a94ecd6a8de..0b7f74beb6a6 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -143,10 +143,14 @@ static int dt_to_map_one_config(struct pinctrl *p,
pctldev = get_pinctrl_dev_from_of_node(np_pctldev);
if (pctldev)
break;
- /* Do not defer probing of hogs (circular loop) */
+ /*
+ * Do not defer probing of hogs (circular loop)
+ *
+ * Return 1 to let the caller catch the case.
+ */
if (np_pctldev == p->dev->of_node) {
of_node_put(np_pctldev);
- return -ENODEV;
+ return 1;
}
}
of_node_put(np_pctldev);
@@ -265,6 +269,8 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev)
ret = dt_to_map_one_config(p, pctldev, statename,
np_config);
of_node_put(np_config);
+ if (ret == 1)
+ continue;
if (ret < 0)
goto err;
}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 842a1e6cbfc4..18de31328540 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -37,16 +37,16 @@ static inline const struct group_desc *imx_pinctrl_find_group_by_name(
struct pinctrl_dev *pctldev,
const char *name)
{
- const struct group_desc *grp = NULL;
+ const struct group_desc *grp;
int i;
for (i = 0; i < pctldev->num_groups; i++) {
grp = pinctrl_generic_get_group(pctldev, i);
if (grp && !strcmp(grp->grp.name, name))
- break;
+ return grp;
}
- return grp;
+ return NULL;
}
static void imx_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
diff --git a/drivers/pinctrl/mediatek/pinctrl-airoha.c b/drivers/pinctrl/mediatek/pinctrl-airoha.c
index 547a798b71c8..5d84a778683d 100644
--- a/drivers/pinctrl/mediatek/pinctrl-airoha.c
+++ b/drivers/pinctrl/mediatek/pinctrl-airoha.c
@@ -6,6 +6,7 @@
*/
#include <dt-bindings/pinctrl/mt65xx.h>
+#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/cleanup.h>
#include <linux/gpio/driver.h>
@@ -112,39 +113,19 @@
#define REG_LAN_LED1_MAPPING 0x0280
#define LAN4_LED_MAPPING_MASK GENMASK(18, 16)
-#define LAN4_PHY4_LED_MAP BIT(18)
-#define LAN4_PHY2_LED_MAP BIT(17)
-#define LAN4_PHY1_LED_MAP BIT(16)
-#define LAN4_PHY0_LED_MAP 0
-#define LAN4_PHY3_LED_MAP GENMASK(17, 16)
+#define LAN4_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN4_LED_MAPPING_MASK, (_n))
#define LAN3_LED_MAPPING_MASK GENMASK(14, 12)
-#define LAN3_PHY4_LED_MAP BIT(14)
-#define LAN3_PHY2_LED_MAP BIT(13)
-#define LAN3_PHY1_LED_MAP BIT(12)
-#define LAN3_PHY0_LED_MAP 0
-#define LAN3_PHY3_LED_MAP GENMASK(13, 12)
+#define LAN3_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN3_LED_MAPPING_MASK, (_n))
#define LAN2_LED_MAPPING_MASK GENMASK(10, 8)
-#define LAN2_PHY4_LED_MAP BIT(12)
-#define LAN2_PHY2_LED_MAP BIT(11)
-#define LAN2_PHY1_LED_MAP BIT(10)
-#define LAN2_PHY0_LED_MAP 0
-#define LAN2_PHY3_LED_MAP GENMASK(11, 10)
+#define LAN2_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN2_LED_MAPPING_MASK, (_n))
#define LAN1_LED_MAPPING_MASK GENMASK(6, 4)
-#define LAN1_PHY4_LED_MAP BIT(6)
-#define LAN1_PHY2_LED_MAP BIT(5)
-#define LAN1_PHY1_LED_MAP BIT(4)
-#define LAN1_PHY0_LED_MAP 0
-#define LAN1_PHY3_LED_MAP GENMASK(5, 4)
+#define LAN1_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN1_LED_MAPPING_MASK, (_n))
#define LAN0_LED_MAPPING_MASK GENMASK(2, 0)
-#define LAN0_PHY4_LED_MAP BIT(3)
-#define LAN0_PHY2_LED_MAP BIT(2)
-#define LAN0_PHY1_LED_MAP BIT(1)
-#define LAN0_PHY0_LED_MAP 0
-#define LAN0_PHY3_LED_MAP GENMASK(2, 1)
+#define LAN0_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN0_LED_MAPPING_MASK, (_n))
/* CONF */
#define REG_I2C_SDA_E2 0x001c
@@ -1476,8 +1457,8 @@ static const struct airoha_pinctrl_func_group phy1_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN1_LED_MAPPING_MASK,
- LAN1_PHY1_LED_MAP
+ LAN0_LED_MAPPING_MASK,
+ LAN0_PHY_LED_MAP(0)
},
.regmap_size = 2,
}, {
@@ -1491,8 +1472,8 @@ static const struct airoha_pinctrl_func_group phy1_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN2_LED_MAPPING_MASK,
- LAN2_PHY1_LED_MAP
+ LAN1_LED_MAPPING_MASK,
+ LAN1_PHY_LED_MAP(0)
},
.regmap_size = 2,
}, {
@@ -1506,8 +1487,8 @@ static const struct airoha_pinctrl_func_group phy1_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN3_LED_MAPPING_MASK,
- LAN3_PHY1_LED_MAP
+ LAN2_LED_MAPPING_MASK,
+ LAN2_PHY_LED_MAP(0)
},
.regmap_size = 2,
}, {
@@ -1521,8 +1502,8 @@ static const struct airoha_pinctrl_func_group phy1_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN4_LED_MAPPING_MASK,
- LAN4_PHY1_LED_MAP
+ LAN3_LED_MAPPING_MASK,
+ LAN3_PHY_LED_MAP(0)
},
.regmap_size = 2,
},
@@ -1540,8 +1521,8 @@ static const struct airoha_pinctrl_func_group phy2_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN1_LED_MAPPING_MASK,
- LAN1_PHY2_LED_MAP
+ LAN0_LED_MAPPING_MASK,
+ LAN0_PHY_LED_MAP(1)
},
.regmap_size = 2,
}, {
@@ -1555,8 +1536,8 @@ static const struct airoha_pinctrl_func_group phy2_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN2_LED_MAPPING_MASK,
- LAN2_PHY2_LED_MAP
+ LAN1_LED_MAPPING_MASK,
+ LAN1_PHY_LED_MAP(1)
},
.regmap_size = 2,
}, {
@@ -1570,8 +1551,8 @@ static const struct airoha_pinctrl_func_group phy2_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN3_LED_MAPPING_MASK,
- LAN3_PHY2_LED_MAP
+ LAN2_LED_MAPPING_MASK,
+ LAN2_PHY_LED_MAP(1)
},
.regmap_size = 2,
}, {
@@ -1585,8 +1566,8 @@ static const struct airoha_pinctrl_func_group phy2_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN4_LED_MAPPING_MASK,
- LAN4_PHY2_LED_MAP
+ LAN3_LED_MAPPING_MASK,
+ LAN3_PHY_LED_MAP(1)
},
.regmap_size = 2,
},
@@ -1604,8 +1585,8 @@ static const struct airoha_pinctrl_func_group phy3_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN1_LED_MAPPING_MASK,
- LAN1_PHY3_LED_MAP
+ LAN0_LED_MAPPING_MASK,
+ LAN0_PHY_LED_MAP(2)
},
.regmap_size = 2,
}, {
@@ -1619,8 +1600,8 @@ static const struct airoha_pinctrl_func_group phy3_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN2_LED_MAPPING_MASK,
- LAN2_PHY3_LED_MAP
+ LAN1_LED_MAPPING_MASK,
+ LAN1_PHY_LED_MAP(2)
},
.regmap_size = 2,
}, {
@@ -1634,8 +1615,8 @@ static const struct airoha_pinctrl_func_group phy3_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN3_LED_MAPPING_MASK,
- LAN3_PHY3_LED_MAP
+ LAN2_LED_MAPPING_MASK,
+ LAN2_PHY_LED_MAP(2)
},
.regmap_size = 2,
}, {
@@ -1649,8 +1630,8 @@ static const struct airoha_pinctrl_func_group phy3_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN4_LED_MAPPING_MASK,
- LAN4_PHY3_LED_MAP
+ LAN3_LED_MAPPING_MASK,
+ LAN3_PHY_LED_MAP(2)
},
.regmap_size = 2,
},
@@ -1668,8 +1649,8 @@ static const struct airoha_pinctrl_func_group phy4_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN1_LED_MAPPING_MASK,
- LAN1_PHY4_LED_MAP
+ LAN0_LED_MAPPING_MASK,
+ LAN0_PHY_LED_MAP(3)
},
.regmap_size = 2,
}, {
@@ -1683,8 +1664,8 @@ static const struct airoha_pinctrl_func_group phy4_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN2_LED_MAPPING_MASK,
- LAN2_PHY4_LED_MAP
+ LAN1_LED_MAPPING_MASK,
+ LAN1_PHY_LED_MAP(3)
},
.regmap_size = 2,
}, {
@@ -1698,8 +1679,8 @@ static const struct airoha_pinctrl_func_group phy4_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN3_LED_MAPPING_MASK,
- LAN3_PHY4_LED_MAP
+ LAN2_LED_MAPPING_MASK,
+ LAN2_PHY_LED_MAP(3)
},
.regmap_size = 2,
}, {
@@ -1713,8 +1694,8 @@ static const struct airoha_pinctrl_func_group phy4_led0_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED0_MAPPING,
- LAN4_LED_MAPPING_MASK,
- LAN4_PHY4_LED_MAP
+ LAN3_LED_MAPPING_MASK,
+ LAN3_PHY_LED_MAP(3)
},
.regmap_size = 2,
},
@@ -1732,8 +1713,8 @@ static const struct airoha_pinctrl_func_group phy1_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN1_LED_MAPPING_MASK,
- LAN1_PHY1_LED_MAP
+ LAN0_LED_MAPPING_MASK,
+ LAN0_PHY_LED_MAP(0)
},
.regmap_size = 2,
}, {
@@ -1747,8 +1728,8 @@ static const struct airoha_pinctrl_func_group phy1_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN2_LED_MAPPING_MASK,
- LAN2_PHY1_LED_MAP
+ LAN1_LED_MAPPING_MASK,
+ LAN1_PHY_LED_MAP(0)
},
.regmap_size = 2,
}, {
@@ -1762,8 +1743,8 @@ static const struct airoha_pinctrl_func_group phy1_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN3_LED_MAPPING_MASK,
- LAN3_PHY1_LED_MAP
+ LAN2_LED_MAPPING_MASK,
+ LAN2_PHY_LED_MAP(0)
},
.regmap_size = 2,
}, {
@@ -1777,8 +1758,8 @@ static const struct airoha_pinctrl_func_group phy1_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN4_LED_MAPPING_MASK,
- LAN4_PHY1_LED_MAP
+ LAN3_LED_MAPPING_MASK,
+ LAN3_PHY_LED_MAP(0)
},
.regmap_size = 2,
},
@@ -1796,8 +1777,8 @@ static const struct airoha_pinctrl_func_group phy2_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN1_LED_MAPPING_MASK,
- LAN1_PHY2_LED_MAP
+ LAN0_LED_MAPPING_MASK,
+ LAN0_PHY_LED_MAP(1)
},
.regmap_size = 2,
}, {
@@ -1811,8 +1792,8 @@ static const struct airoha_pinctrl_func_group phy2_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN2_LED_MAPPING_MASK,
- LAN2_PHY2_LED_MAP
+ LAN1_LED_MAPPING_MASK,
+ LAN1_PHY_LED_MAP(1)
},
.regmap_size = 2,
}, {
@@ -1826,8 +1807,8 @@ static const struct airoha_pinctrl_func_group phy2_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN3_LED_MAPPING_MASK,
- LAN3_PHY2_LED_MAP
+ LAN2_LED_MAPPING_MASK,
+ LAN2_PHY_LED_MAP(1)
},
.regmap_size = 2,
}, {
@@ -1841,8 +1822,8 @@ static const struct airoha_pinctrl_func_group phy2_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN4_LED_MAPPING_MASK,
- LAN4_PHY2_LED_MAP
+ LAN3_LED_MAPPING_MASK,
+ LAN3_PHY_LED_MAP(1)
},
.regmap_size = 2,
},
@@ -1860,8 +1841,8 @@ static const struct airoha_pinctrl_func_group phy3_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN1_LED_MAPPING_MASK,
- LAN1_PHY3_LED_MAP
+ LAN0_LED_MAPPING_MASK,
+ LAN0_PHY_LED_MAP(2)
},
.regmap_size = 2,
}, {
@@ -1875,8 +1856,8 @@ static const struct airoha_pinctrl_func_group phy3_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN2_LED_MAPPING_MASK,
- LAN2_PHY3_LED_MAP
+ LAN1_LED_MAPPING_MASK,
+ LAN1_PHY_LED_MAP(2)
},
.regmap_size = 2,
}, {
@@ -1890,8 +1871,8 @@ static const struct airoha_pinctrl_func_group phy3_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN3_LED_MAPPING_MASK,
- LAN3_PHY3_LED_MAP
+ LAN2_LED_MAPPING_MASK,
+ LAN2_PHY_LED_MAP(2)
},
.regmap_size = 2,
}, {
@@ -1905,8 +1886,8 @@ static const struct airoha_pinctrl_func_group phy3_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN4_LED_MAPPING_MASK,
- LAN4_PHY3_LED_MAP
+ LAN3_LED_MAPPING_MASK,
+ LAN3_PHY_LED_MAP(2)
},
.regmap_size = 2,
},
@@ -1924,8 +1905,8 @@ static const struct airoha_pinctrl_func_group phy4_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN1_LED_MAPPING_MASK,
- LAN1_PHY4_LED_MAP
+ LAN0_LED_MAPPING_MASK,
+ LAN0_PHY_LED_MAP(3)
},
.regmap_size = 2,
}, {
@@ -1939,8 +1920,8 @@ static const struct airoha_pinctrl_func_group phy4_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN2_LED_MAPPING_MASK,
- LAN2_PHY4_LED_MAP
+ LAN1_LED_MAPPING_MASK,
+ LAN1_PHY_LED_MAP(3)
},
.regmap_size = 2,
}, {
@@ -1954,8 +1935,8 @@ static const struct airoha_pinctrl_func_group phy4_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN3_LED_MAPPING_MASK,
- LAN3_PHY4_LED_MAP
+ LAN2_LED_MAPPING_MASK,
+ LAN2_PHY_LED_MAP(3)
},
.regmap_size = 2,
}, {
@@ -1969,8 +1950,8 @@ static const struct airoha_pinctrl_func_group phy4_led1_func_group[] = {
.regmap[1] = {
AIROHA_FUNC_MUX,
REG_LAN_LED1_MAPPING,
- LAN4_LED_MAPPING_MASK,
- LAN4_PHY4_LED_MAP
+ LAN3_LED_MAPPING_MASK,
+ LAN3_PHY_LED_MAP(3)
},
.regmap_size = 2,
},
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 253a0cc57e39..e5a32a0532ee 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -487,7 +487,7 @@ static int meson_pinconf_get(struct pinctrl_dev *pcdev, unsigned int pin,
case PIN_CONFIG_BIAS_PULL_DOWN:
case PIN_CONFIG_BIAS_PULL_UP:
if (meson_pinconf_get_pull(pc, pin) == param)
- arg = 1;
+ arg = 60000;
else
return -EINVAL;
break;
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index 335744ac8310..79f9c08e5039 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -417,20 +417,22 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
- unsigned int reg = OUTPUT_EN;
+ unsigned int en_offset = offset;
+ unsigned int reg = OUTPUT_VAL;
unsigned int mask, val, ret;
armada_37xx_update_reg(&reg, &offset);
mask = BIT(offset);
+ val = value ? mask : 0;
- ret = regmap_update_bits(info->regmap, reg, mask, mask);
-
+ ret = regmap_update_bits(info->regmap, reg, mask, val);
if (ret)
return ret;
- reg = OUTPUT_VAL;
- val = value ? mask : 0;
- regmap_update_bits(info->regmap, reg, mask, val);
+ reg = OUTPUT_EN;
+ armada_37xx_update_reg(&reg, &en_offset);
+
+ regmap_update_bits(info->regmap, reg, mask, mask);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index b96e6368a956..4d1f41488017 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -382,6 +382,7 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
{
struct mcp23s08 *mcp = data;
int intcap, intcon, intf, i, gpio, gpio_orig, intcap_mask, defval, gpinten;
+ bool need_unmask = false;
unsigned long int enabled_interrupts;
unsigned int child_irq;
bool intf_set, intcap_changed, gpio_bit_changed,
@@ -396,9 +397,6 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
goto unlock;
}
- if (mcp_read(mcp, MCP_INTCAP, &intcap))
- goto unlock;
-
if (mcp_read(mcp, MCP_INTCON, &intcon))
goto unlock;
@@ -408,6 +406,16 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
if (mcp_read(mcp, MCP_DEFVAL, &defval))
goto unlock;
+ /* Mask level interrupts to avoid their immediate reactivation after clearing */
+ if (intcon) {
+ need_unmask = true;
+ if (mcp_write(mcp, MCP_GPINTEN, gpinten & ~intcon))
+ goto unlock;
+ }
+
+ if (mcp_read(mcp, MCP_INTCAP, &intcap))
+ goto unlock;
+
/* This clears the interrupt(configurable on S18) */
if (mcp_read(mcp, MCP_GPIO, &gpio))
goto unlock;
@@ -470,9 +478,18 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
}
}
+ if (need_unmask) {
+ mutex_lock(&mcp->lock);
+ goto unlock;
+ }
+
return IRQ_HANDLED;
unlock:
+ if (need_unmask)
+ if (mcp_write(mcp, MCP_GPINTEN, gpinten))
+ dev_err(mcp->chip.parent, "can't unmask GPINTEN\n");
+
mutex_unlock(&mcp->lock);
return IRQ_HANDLED;
}
diff --git a/drivers/pinctrl/qcom/Kconfig.msm b/drivers/pinctrl/qcom/Kconfig.msm
index 35f47660a56b..a0d63a672539 100644
--- a/drivers/pinctrl/qcom/Kconfig.msm
+++ b/drivers/pinctrl/qcom/Kconfig.msm
@@ -138,10 +138,10 @@ config PINCTRL_MSM8916
Qualcomm TLMM block found on the Qualcomm 8916 platform.
config PINCTRL_MSM8917
- tristate "Qualcomm 8917 pin controller driver"
+ tristate "Qualcomm 8917/8937 pin controller driver"
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm TLMM block found on the Qualcomm MSM8917 platform.
+ Qualcomm TLMM block found on the Qualcomm MSM8917, MSM8937 platform.
config PINCTRL_MSM8953
tristate "Qualcomm 8953 pin controller driver"
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 82f0cc43bbf4..0eb816395dc6 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -44,7 +44,6 @@
* @pctrl: pinctrl handle.
* @chip: gpiochip handle.
* @desc: pin controller descriptor
- * @restart_nb: restart notifier block.
* @irq: parent irq for the TLMM irq_chip.
* @intr_target_use_scm: route irq to application cpu using scm calls
* @lock: Spinlock to protect register resources as well
@@ -64,7 +63,6 @@ struct msm_pinctrl {
struct pinctrl_dev *pctrl;
struct gpio_chip chip;
struct pinctrl_desc desc;
- struct notifier_block restart_nb;
int irq;
@@ -1471,10 +1469,9 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
return 0;
}
-static int msm_ps_hold_restart(struct notifier_block *nb, unsigned long action,
- void *data)
+static int msm_ps_hold_restart(struct sys_off_data *data)
{
- struct msm_pinctrl *pctrl = container_of(nb, struct msm_pinctrl, restart_nb);
+ struct msm_pinctrl *pctrl = data->cb_data;
writel(0, pctrl->regs[0] + PS_HOLD_OFFSET);
mdelay(1000);
@@ -1485,7 +1482,11 @@ static struct msm_pinctrl *poweroff_pctrl;
static void msm_ps_hold_poweroff(void)
{
- msm_ps_hold_restart(&poweroff_pctrl->restart_nb, 0, NULL);
+ struct sys_off_data data = {
+ .cb_data = poweroff_pctrl,
+ };
+
+ msm_ps_hold_restart(&data);
}
static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl)
@@ -1495,9 +1496,11 @@ static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl)
for (i = 0; i < pctrl->soc->nfunctions; i++)
if (!strcmp(func[i].name, "ps_hold")) {
- pctrl->restart_nb.notifier_call = msm_ps_hold_restart;
- pctrl->restart_nb.priority = 128;
- if (register_restart_handler(&pctrl->restart_nb))
+ if (devm_register_sys_off_handler(pctrl->dev,
+ SYS_OFF_MODE_RESTART,
+ 128,
+ msm_ps_hold_restart,
+ pctrl))
dev_err(pctrl->dev,
"failed to setup restart handler.\n");
poweroff_pctrl = pctrl;
@@ -1599,8 +1602,6 @@ void msm_pinctrl_remove(struct platform_device *pdev)
struct msm_pinctrl *pctrl = platform_get_drvdata(pdev);
gpiochip_remove(&pctrl->chip);
-
- unregister_restart_handler(&pctrl->restart_nb);
}
EXPORT_SYMBOL(msm_pinctrl_remove);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8917.c b/drivers/pinctrl/qcom/pinctrl-msm8917.c
index cff137bb3b23..350636807b07 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8917.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8917.c
@@ -539,6 +539,7 @@ enum msm8917_functions {
msm_mux_webcam_standby,
msm_mux_wsa_io,
msm_mux_wsa_irq,
+ msm_mux_wsa_reset,
msm_mux__,
};
@@ -1123,6 +1124,10 @@ static const char * const wsa_io_groups[] = {
"gpio94", "gpio95",
};
+static const char * const wsa_reset_groups[] = {
+ "gpio96",
+};
+
static const char * const blsp_spi8_groups[] = {
"gpio96", "gpio97", "gpio98", "gpio99",
};
@@ -1378,6 +1383,7 @@ static const struct pinfunction msm8917_functions[] = {
MSM_PIN_FUNCTION(webcam_standby),
MSM_PIN_FUNCTION(wsa_io),
MSM_PIN_FUNCTION(wsa_irq),
+ MSM_PIN_FUNCTION(wsa_reset),
};
static const struct msm_pingroup msm8917_groups[] = {
@@ -1616,5 +1622,5 @@ static void __exit msm8917_pinctrl_exit(void)
}
module_exit(msm8917_pinctrl_exit);
-MODULE_DESCRIPTION("Qualcomm msm8917 pinctrl driver");
+MODULE_DESCRIPTION("Qualcomm msm8917/msm8937 pinctrl driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8750.c b/drivers/pinctrl/qcom/pinctrl-sm8750.c
index 1af11cd95fb0..b94fb4ee0ec3 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8750.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8750.c
@@ -46,7 +46,9 @@
.out_bit = 1, \
.intr_enable_bit = 0, \
.intr_status_bit = 0, \
- .intr_target_bit = 5, \
+ .intr_wakeup_present_bit = 6, \
+ .intr_wakeup_enable_bit = 7, \
+ .intr_target_bit = 8, \
.intr_target_kpss_val = 3, \
.intr_raw_status_bit = 4, \
.intr_polarity_bit = 1, \
diff --git a/drivers/pinctrl/renesas/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c
index 8b36161c7c50..3b5812963850 100644
--- a/drivers/pinctrl/renesas/pinctrl-rza2.c
+++ b/drivers/pinctrl/renesas/pinctrl-rza2.c
@@ -246,6 +246,9 @@ static int rza2_gpio_register(struct rza2_pinctrl_priv *priv)
int ret;
chip.label = devm_kasprintf(priv->dev, GFP_KERNEL, "%pOFn", np);
+ if (!chip.label)
+ return -ENOMEM;
+
chip.parent = priv->dev;
chip.ngpio = priv->npins;
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index d1da7f53fc60..c72e250f4a15 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -318,6 +318,7 @@ struct rzg2l_pinctrl_pin_settings {
* @pmc: PMC registers cache
* @pfc: PFC registers cache
* @iolh: IOLH registers cache
+ * @pupd: PUPD registers cache
* @ien: IEN registers cache
* @sd_ch: SD_CH registers cache
* @eth_poc: ET_POC registers cache
@@ -331,6 +332,7 @@ struct rzg2l_pinctrl_reg_cache {
u32 *pfc;
u32 *iolh[2];
u32 *ien[2];
+ u32 *pupd[2];
u8 sd_ch[2];
u8 eth_poc[2];
u8 eth_mode;
@@ -2712,6 +2714,11 @@ static int rzg2l_pinctrl_reg_cache_alloc(struct rzg2l_pinctrl *pctrl)
if (!cache->ien[i])
return -ENOMEM;
+ cache->pupd[i] = devm_kcalloc(pctrl->dev, nports, sizeof(*cache->pupd[i]),
+ GFP_KERNEL);
+ if (!cache->pupd[i])
+ return -ENOMEM;
+
/* Allocate dedicated cache. */
dedicated_cache->iolh[i] = devm_kcalloc(pctrl->dev, n_dedicated_pins,
sizeof(*dedicated_cache->iolh[i]),
@@ -2955,7 +2962,7 @@ static void rzg2l_pinctrl_pm_setup_regs(struct rzg2l_pinctrl *pctrl, bool suspen
struct rzg2l_pinctrl_reg_cache *cache = pctrl->cache;
for (u32 port = 0; port < nports; port++) {
- bool has_iolh, has_ien;
+ bool has_iolh, has_ien, has_pupd;
u32 off, caps;
u8 pincnt;
u64 cfg;
@@ -2967,6 +2974,7 @@ static void rzg2l_pinctrl_pm_setup_regs(struct rzg2l_pinctrl *pctrl, bool suspen
caps = FIELD_GET(PIN_CFG_MASK, cfg);
has_iolh = !!(caps & (PIN_CFG_IOLH_A | PIN_CFG_IOLH_B | PIN_CFG_IOLH_C));
has_ien = !!(caps & PIN_CFG_IEN);
+ has_pupd = !!(caps & PIN_CFG_PUPD);
if (suspend)
RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + PFC(off), cache->pfc[port]);
@@ -2985,6 +2993,15 @@ static void rzg2l_pinctrl_pm_setup_regs(struct rzg2l_pinctrl *pctrl, bool suspen
}
}
+ if (has_pupd) {
+ RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + PUPD(off),
+ cache->pupd[0][port]);
+ if (pincnt >= 4) {
+ RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + PUPD(off),
+ cache->pupd[1][port]);
+ }
+ }
+
RZG2L_PCTRL_REG_ACCESS16(suspend, pctrl->base + PM(off), cache->pm[port]);
RZG2L_PCTRL_REG_ACCESS8(suspend, pctrl->base + P(off), cache->p[port]);
diff --git a/drivers/pinctrl/sophgo/pinctrl-cv18xx.c b/drivers/pinctrl/sophgo/pinctrl-cv18xx.c
index 57f2674e75d6..84b4850771ce 100644
--- a/drivers/pinctrl/sophgo/pinctrl-cv18xx.c
+++ b/drivers/pinctrl/sophgo/pinctrl-cv18xx.c
@@ -574,10 +574,10 @@ static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl,
struct cv1800_pin *pin,
unsigned long *configs,
unsigned int num_configs,
- u32 *value)
+ u32 *value, u32 *mask)
{
int i;
- u32 v = 0;
+ u32 v = 0, m = 0;
enum cv1800_pin_io_type type;
int ret;
@@ -596,10 +596,12 @@ static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl,
case PIN_CONFIG_BIAS_PULL_DOWN:
v &= ~PIN_IO_PULLDOWN;
v |= FIELD_PREP(PIN_IO_PULLDOWN, arg);
+ m |= PIN_IO_PULLDOWN;
break;
case PIN_CONFIG_BIAS_PULL_UP:
v &= ~PIN_IO_PULLUP;
v |= FIELD_PREP(PIN_IO_PULLUP, arg);
+ m |= PIN_IO_PULLUP;
break;
case PIN_CONFIG_DRIVE_STRENGTH_UA:
ret = cv1800_pinctrl_oc2reg(pctrl, pin, arg);
@@ -607,6 +609,7 @@ static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl,
return ret;
v &= ~PIN_IO_DRIVE;
v |= FIELD_PREP(PIN_IO_DRIVE, ret);
+ m |= PIN_IO_DRIVE;
break;
case PIN_CONFIG_INPUT_SCHMITT_UV:
ret = cv1800_pinctrl_schmitt2reg(pctrl, pin, arg);
@@ -614,6 +617,7 @@ static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl,
return ret;
v &= ~PIN_IO_SCHMITT;
v |= FIELD_PREP(PIN_IO_SCHMITT, ret);
+ m |= PIN_IO_SCHMITT;
break;
case PIN_CONFIG_POWER_SOURCE:
/* Ignore power source as it is always fixed */
@@ -621,10 +625,12 @@ static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl,
case PIN_CONFIG_SLEW_RATE:
v &= ~PIN_IO_OUT_FAST_SLEW;
v |= FIELD_PREP(PIN_IO_OUT_FAST_SLEW, arg);
+ m |= PIN_IO_OUT_FAST_SLEW;
break;
case PIN_CONFIG_BIAS_BUS_HOLD:
v &= ~PIN_IO_BUS_HOLD;
v |= FIELD_PREP(PIN_IO_BUS_HOLD, arg);
+ m |= PIN_IO_BUS_HOLD;
break;
default:
return -ENOTSUPP;
@@ -632,17 +638,19 @@ static int cv1800_pinconf_compute_config(struct cv1800_pinctrl *pctrl,
}
*value = v;
+ *mask = m;
return 0;
}
static int cv1800_pin_set_config(struct cv1800_pinctrl *pctrl,
unsigned int pin_id,
- u32 value)
+ u32 value, u32 mask)
{
struct cv1800_pin *pin = cv1800_get_pin(pctrl, pin_id);
unsigned long flags;
void __iomem *addr;
+ u32 reg;
if (!pin)
return -EINVAL;
@@ -650,7 +658,10 @@ static int cv1800_pin_set_config(struct cv1800_pinctrl *pctrl,
addr = cv1800_pinctrl_get_component_addr(pctrl, &pin->conf);
raw_spin_lock_irqsave(&pctrl->lock, flags);
- writel(value, addr);
+ reg = readl(addr);
+ reg &= ~mask;
+ reg |= value;
+ writel(reg, addr);
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
@@ -662,16 +673,17 @@ static int cv1800_pconf_set(struct pinctrl_dev *pctldev,
{
struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
struct cv1800_pin *pin = cv1800_get_pin(pctrl, pin_id);
- u32 value;
+ u32 value, mask;
if (!pin)
return -ENODEV;
if (cv1800_pinconf_compute_config(pctrl, pin,
- configs, num_configs, &value))
+ configs, num_configs,
+ &value, &mask))
return -ENOTSUPP;
- return cv1800_pin_set_config(pctrl, pin_id, value);
+ return cv1800_pin_set_config(pctrl, pin_id, value, mask);
}
static int cv1800_pconf_group_set(struct pinctrl_dev *pctldev,
@@ -682,7 +694,7 @@ static int cv1800_pconf_group_set(struct pinctrl_dev *pctldev,
struct cv1800_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
const struct group_desc *group;
const struct cv1800_pin_mux_config *pinmuxs;
- u32 value;
+ u32 value, mask;
int i;
group = pinctrl_generic_get_group(pctldev, gsel);
@@ -692,11 +704,12 @@ static int cv1800_pconf_group_set(struct pinctrl_dev *pctldev,
pinmuxs = group->data;
if (cv1800_pinconf_compute_config(pctrl, pinmuxs[0].pin,
- configs, num_configs, &value))
+ configs, num_configs,
+ &value, &mask))
return -ENOTSUPP;
for (i = 0; i < group->grp.npins; i++)
- cv1800_pin_set_config(pctrl, group->grp.pins[i], value);
+ cv1800_pin_set_config(pctrl, group->grp.pins[i], value, mask);
return 0;
}
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index 3b046450bd3f..edcc78ebce45 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -278,8 +278,8 @@ static int tegra_pinctrl_set_mux(struct pinctrl_dev *pctldev,
return 0;
}
-static const struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctldev,
- unsigned int offset)
+static int tegra_pinctrl_get_group_index(struct pinctrl_dev *pctldev,
+ unsigned int offset)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
unsigned int group, num_pins, j;
@@ -292,12 +292,35 @@ static const struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *
continue;
for (j = 0; j < num_pins; j++) {
if (offset == pins[j])
- return &pmx->soc->groups[group];
+ return group;
}
}
- dev_err(pctldev->dev, "Pingroup not found for pin %u\n", offset);
- return NULL;
+ return -EINVAL;
+}
+
+static const struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctldev,
+ unsigned int offset,
+ int group_index)
+{
+ struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ if (group_index < 0 || group_index >= pmx->soc->ngroups)
+ return NULL;
+
+ return &pmx->soc->groups[group_index];
+}
+
+static struct tegra_pingroup_config *tegra_pinctrl_get_group_config(struct pinctrl_dev *pctldev,
+ unsigned int offset,
+ int group_index)
+{
+ struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ if (group_index < 0)
+ return NULL;
+
+ return &pmx->pingroup_configs[group_index];
}
static int tegra_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev,
@@ -306,12 +329,15 @@ static int tegra_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev,
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
const struct tegra_pingroup *group;
+ struct tegra_pingroup_config *config;
+ int group_index;
u32 value;
if (!pmx->soc->sfsel_in_mux)
return 0;
- group = tegra_pinctrl_get_group(pctldev, offset);
+ group_index = tegra_pinctrl_get_group_index(pctldev, offset);
+ group = tegra_pinctrl_get_group(pctldev, offset, group_index);
if (!group)
return -EINVAL;
@@ -319,7 +345,11 @@ static int tegra_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev,
if (group->mux_reg < 0 || group->sfsel_bit < 0)
return -EINVAL;
+ config = tegra_pinctrl_get_group_config(pctldev, offset, group_index);
+ if (!config)
+ return -EINVAL;
value = pmx_readl(pmx, group->mux_bank, group->mux_reg);
+ config->is_sfsel = (value & BIT(group->sfsel_bit)) != 0;
value &= ~BIT(group->sfsel_bit);
pmx_writel(pmx, value, group->mux_bank, group->mux_reg);
@@ -332,12 +362,15 @@ static void tegra_pinctrl_gpio_disable_free(struct pinctrl_dev *pctldev,
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
const struct tegra_pingroup *group;
+ struct tegra_pingroup_config *config;
+ int group_index;
u32 value;
if (!pmx->soc->sfsel_in_mux)
return;
- group = tegra_pinctrl_get_group(pctldev, offset);
+ group_index = tegra_pinctrl_get_group_index(pctldev, offset);
+ group = tegra_pinctrl_get_group(pctldev, offset, group_index);
if (!group)
return;
@@ -345,8 +378,12 @@ static void tegra_pinctrl_gpio_disable_free(struct pinctrl_dev *pctldev,
if (group->mux_reg < 0 || group->sfsel_bit < 0)
return;
+ config = tegra_pinctrl_get_group_config(pctldev, offset, group_index);
+ if (!config)
+ return;
value = pmx_readl(pmx, group->mux_bank, group->mux_reg);
- value |= BIT(group->sfsel_bit);
+ if (config->is_sfsel)
+ value |= BIT(group->sfsel_bit);
pmx_writel(pmx, value, group->mux_bank, group->mux_reg);
}
@@ -791,6 +828,12 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
pmx->dev = &pdev->dev;
pmx->soc = soc_data;
+ pmx->pingroup_configs = devm_kcalloc(&pdev->dev,
+ pmx->soc->ngroups, sizeof(*pmx->pingroup_configs),
+ GFP_KERNEL);
+ if (!pmx->pingroup_configs)
+ return -ENOMEM;
+
/*
* Each mux group will appear in 4 functions' list of groups.
* This over-allocates slightly, since not all groups are mux groups.
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.h b/drivers/pinctrl/tegra/pinctrl-tegra.h
index b3289bdf727d..b97136685f7a 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.h
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.h
@@ -8,6 +8,10 @@
#ifndef __PINMUX_TEGRA_H__
#define __PINMUX_TEGRA_H__
+struct tegra_pingroup_config {
+ bool is_sfsel;
+};
+
struct tegra_pmx {
struct device *dev;
struct pinctrl_dev *pctl;
@@ -21,6 +25,8 @@ struct tegra_pmx {
int nbanks;
void __iomem **regs;
u32 *backup_regs;
+ /* Array of size soc->ngroups */
+ struct tegra_pingroup_config *pingroup_configs;
};
enum tegra_pinconf_param {
diff --git a/drivers/platform/x86/amd/hsmp/Kconfig b/drivers/platform/x86/amd/hsmp/Kconfig
index 7d10d4462a45..d6f7a62d55b5 100644
--- a/drivers/platform/x86/amd/hsmp/Kconfig
+++ b/drivers/platform/x86/amd/hsmp/Kconfig
@@ -7,7 +7,7 @@ config AMD_HSMP
tristate
menu "AMD HSMP Driver"
- depends on AMD_NB || COMPILE_TEST
+ depends on AMD_NODE || COMPILE_TEST
config AMD_HSMP_ACPI
tristate "AMD HSMP ACPI device driver"
diff --git a/drivers/platform/x86/amd/hsmp/acpi.c b/drivers/platform/x86/amd/hsmp/acpi.c
index 444b43be35a2..eaae044e4f82 100644
--- a/drivers/platform/x86/amd/hsmp/acpi.c
+++ b/drivers/platform/x86/amd/hsmp/acpi.c
@@ -10,7 +10,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/amd_hsmp.h>
-#include <asm/amd_nb.h>
#include <linux/acpi.h>
#include <linux/device.h>
@@ -24,11 +23,12 @@
#include <uapi/asm-generic/errno-base.h>
+#include <asm/amd_node.h>
+
#include "hsmp.h"
-#define DRIVER_NAME "amd_hsmp"
+#define DRIVER_NAME "hsmp_acpi"
#define DRIVER_VERSION "2.3"
-#define ACPI_HSMP_DEVICE_HID "AMDI0097"
/* These are the strings specified in ACPI table */
#define MSG_IDOFF_STR "MsgIdOffset"
@@ -321,8 +321,8 @@ static int hsmp_acpi_probe(struct platform_device *pdev)
return -ENOMEM;
if (!hsmp_pdev->is_probed) {
- hsmp_pdev->num_sockets = amd_nb_num();
- if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_SOCKETS)
+ hsmp_pdev->num_sockets = amd_num_nodes();
+ if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_NUM_NODES)
return -ENODEV;
hsmp_pdev->sock = devm_kcalloc(&pdev->dev, hsmp_pdev->num_sockets,
diff --git a/drivers/platform/x86/amd/hsmp/hsmp.c b/drivers/platform/x86/amd/hsmp/hsmp.c
index 03164e30b3a5..a3ac09a90de4 100644
--- a/drivers/platform/x86/amd/hsmp/hsmp.c
+++ b/drivers/platform/x86/amd/hsmp/hsmp.c
@@ -8,7 +8,6 @@
*/
#include <asm/amd_hsmp.h>
-#include <asm/amd_nb.h>
#include <linux/acpi.h>
#include <linux/delay.h>
diff --git a/drivers/platform/x86/amd/hsmp/hsmp.h b/drivers/platform/x86/amd/hsmp/hsmp.h
index e852f0a947e4..d58d4f0c20d5 100644
--- a/drivers/platform/x86/amd/hsmp/hsmp.h
+++ b/drivers/platform/x86/amd/hsmp/hsmp.h
@@ -21,10 +21,9 @@
#define HSMP_ATTR_GRP_NAME_SIZE 10
-#define MAX_AMD_SOCKETS 8
-
#define HSMP_CDEV_NAME "hsmp_cdev"
#define HSMP_DEVNODE_NAME "hsmp"
+#define ACPI_HSMP_DEVICE_HID "AMDI0097"
struct hsmp_mbaddr_info {
u32 base_addr;
@@ -41,7 +40,6 @@ struct hsmp_socket {
void __iomem *virt_base_addr;
struct semaphore hsmp_sem;
char name[HSMP_ATTR_GRP_NAME_SIZE];
- struct pci_dev *root;
struct device *dev;
u16 sock_ind;
int (*amd_hsmp_rdwr)(struct hsmp_socket *sock, u32 off, u32 *val, bool rw);
diff --git a/drivers/platform/x86/amd/hsmp/plat.c b/drivers/platform/x86/amd/hsmp/plat.c
index 02ca85762b68..81931e808bbc 100644
--- a/drivers/platform/x86/amd/hsmp/plat.c
+++ b/drivers/platform/x86/amd/hsmp/plat.c
@@ -10,14 +10,17 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/amd_hsmp.h>
-#include <asm/amd_nb.h>
+#include <linux/acpi.h>
+#include <linux/build_bug.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/sysfs.h>
+#include <asm/amd_node.h>
+
#include "hsmp.h"
#define DRIVER_NAME "amd_hsmp"
@@ -34,28 +37,12 @@
#define SMN_HSMP_MSG_RESP 0x0010980
#define SMN_HSMP_MSG_DATA 0x00109E0
-#define HSMP_INDEX_REG 0xc4
-#define HSMP_DATA_REG 0xc8
-
static struct hsmp_plat_device *hsmp_pdev;
static int amd_hsmp_pci_rdwr(struct hsmp_socket *sock, u32 offset,
u32 *value, bool write)
{
- int ret;
-
- if (!sock->root)
- return -ENODEV;
-
- ret = pci_write_config_dword(sock->root, HSMP_INDEX_REG,
- sock->mbinfo.base_addr + offset);
- if (ret)
- return ret;
-
- ret = (write ? pci_write_config_dword(sock->root, HSMP_DATA_REG, *value)
- : pci_read_config_dword(sock->root, HSMP_DATA_REG, value));
-
- return ret;
+ return amd_smn_hsmp_rdwr(sock->sock_ind, sock->mbinfo.base_addr + offset, value, write);
}
static ssize_t hsmp_metric_tbl_plat_read(struct file *filp, struct kobject *kobj,
@@ -95,7 +82,12 @@ static umode_t hsmp_is_sock_attr_visible(struct kobject *kobj,
* Static array of 8 + 1(for NULL) elements is created below
* to create sysfs groups for sockets.
* is_bin_visible function is used to show / hide the necessary groups.
+ *
+ * Validate the maximum number against MAX_AMD_NUM_NODES. If this changes,
+ * then the attributes and groups below must be adjusted.
*/
+static_assert(MAX_AMD_NUM_NODES == 8);
+
#define HSMP_BIN_ATTR(index, _list) \
static const struct bin_attribute attr##index = { \
.attr = { .name = HSMP_METRICS_TABLE_NAME, .mode = 0444}, \
@@ -159,10 +151,7 @@ static int init_platform_device(struct device *dev)
int ret, i;
for (i = 0; i < hsmp_pdev->num_sockets; i++) {
- if (!node_to_amd_nb(i))
- return -ENODEV;
sock = &hsmp_pdev->sock[i];
- sock->root = node_to_amd_nb(i)->root;
sock->sock_ind = i;
sock->dev = dev;
sock->mbinfo.base_addr = SMN_HSMP_BASE;
@@ -278,7 +267,7 @@ static bool legacy_hsmp_support(void)
}
case 0x1A:
switch (boot_cpu_data.x86_model) {
- case 0x00 ... 0x1F:
+ case 0x00 ... 0x0F:
return true;
default:
return false;
@@ -300,16 +289,19 @@ static int __init hsmp_plt_init(void)
return ret;
}
+ if (acpi_dev_present(ACPI_HSMP_DEVICE_HID, NULL, -1))
+ return -ENODEV;
+
hsmp_pdev = get_hsmp_pdev();
if (!hsmp_pdev)
return -ENOMEM;
/*
- * amd_nb_num() returns number of SMN/DF interfaces present in the system
+ * amd_num_nodes() returns number of SMN/DF interfaces present in the system
* if we have N SMN/DF interfaces that ideally means N sockets
*/
- hsmp_pdev->num_sockets = amd_nb_num();
- if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_SOCKETS)
+ hsmp_pdev->num_sockets = amd_num_nodes();
+ if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_NUM_NODES)
return ret;
ret = platform_driver_register(&amd_hsmp_driver);
diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
index b4f49720c87f..2e3f6fc67c56 100644
--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
+++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
@@ -217,6 +217,13 @@ static const struct dmi_system_id fwbug_list[] = {
DMI_MATCH(DMI_BIOS_VERSION, "03.05"),
}
},
+ {
+ .ident = "MECHREVO Wujie 14X (GX4HRXL)",
+ .driver_data = &quirk_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "WUJIE14-GX4HRXL"),
+ }
+ },
{}
};
diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
index e6124498b195..cfd1c37cf6b6 100644
--- a/drivers/platform/x86/amd/pmc/pmc.c
+++ b/drivers/platform/x86/amd/pmc/pmc.c
@@ -724,10 +724,9 @@ static void amd_pmc_s2idle_check(void)
struct smu_metrics table;
int rc;
- /* CZN: Ensure that future s0i3 entry attempts at least 10ms passed */
- if (pdev->cpu_id == AMD_CPU_ID_CZN && !get_metrics_table(pdev, &table) &&
- table.s0i3_last_entry_status)
- usleep_range(10000, 20000);
+ /* Avoid triggering OVP */
+ if (!get_metrics_table(pdev, &table) && table.s0i3_last_entry_status)
+ msleep(2500);
/* Dump the IdleMask before we add to the STB */
amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
index 14b99d8b63d2..d3bd12ad036a 100644
--- a/drivers/platform/x86/amd/pmf/tee-if.c
+++ b/drivers/platform/x86/amd/pmf/tee-if.c
@@ -334,6 +334,11 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
return 0;
}
+static inline bool amd_pmf_pb_valid(struct amd_pmf_dev *dev)
+{
+ return memchr_inv(dev->policy_buf, 0xff, dev->policy_sz);
+}
+
#ifdef CONFIG_AMD_PMF_DEBUG
static void amd_pmf_hex_dump_pb(struct amd_pmf_dev *dev)
{
@@ -361,12 +366,22 @@ static ssize_t amd_pmf_get_pb_data(struct file *filp, const char __user *buf,
dev->policy_buf = new_policy_buf;
dev->policy_sz = length;
+ if (!amd_pmf_pb_valid(dev)) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
amd_pmf_hex_dump_pb(dev);
ret = amd_pmf_start_policy_engine(dev);
if (ret < 0)
- return ret;
+ goto cleanup;
return length;
+
+cleanup:
+ kfree(dev->policy_buf);
+ dev->policy_buf = NULL;
+ return ret;
}
static const struct file_operations pb_fops = {
@@ -528,6 +543,12 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
memcpy_fromio(dev->policy_buf, dev->policy_base, dev->policy_sz);
+ if (!amd_pmf_pb_valid(dev)) {
+ dev_info(dev->dev, "No Smart PC policy present\n");
+ ret = -EINVAL;
+ goto err_free_policy;
+ }
+
amd_pmf_hex_dump_pb(dev);
dev->prev_data = kzalloc(sizeof(*dev->prev_data), GFP_KERNEL);
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 38ef778e8c19..47cc766624d7 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -304,6 +304,7 @@ struct asus_wmi {
u32 kbd_rgb_dev;
bool kbd_rgb_state_available;
+ bool oobe_state_available;
u8 throttle_thermal_policy_mode;
u32 throttle_thermal_policy_dev;
@@ -1826,7 +1827,7 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
goto error;
}
- if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_OOBE)) {
+ if (asus->oobe_state_available) {
/*
* Disable OOBE state, so that e.g. the keyboard backlight
* works.
@@ -4723,6 +4724,7 @@ static int asus_wmi_add(struct platform_device *pdev)
asus->egpu_enable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU);
asus->dgpu_disable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_DGPU);
asus->kbd_rgb_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_STATE);
+ asus->oobe_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_OOBE);
asus->ally_mcu_usb_switch = acpi_has_method(NULL, ASUS_USB0_PWR_EC0_CSEE)
&& dmi_check_system(asus_ally_mcu_quirk);
@@ -4777,7 +4779,8 @@ static int asus_wmi_add(struct platform_device *pdev)
goto fail_leds;
asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
- if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
+ if ((result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT)) ==
+ (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
asus->driver->wlan_ctrl_by_user = 1;
if (!(asus->driver->wlan_ctrl_by_user && ashs_present())) {
@@ -4970,6 +4973,13 @@ static int asus_hotk_restore(struct device *device)
}
if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
kbd_led_update(asus);
+ if (asus->oobe_state_available) {
+ /*
+ * Disable OOBE state, so that e.g. the keyboard backlight
+ * works.
+ */
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_OOBE, 1, NULL);
+ }
if (asus_wmi_has_fnlock_key(asus))
asus_wmi_fnlock_update(asus);
diff --git a/drivers/platform/x86/dell/alienware-wmi.c b/drivers/platform/x86/dell/alienware-wmi.c
index 1426ea8e4f19..1a711d395d2d 100644
--- a/drivers/platform/x86/dell/alienware-wmi.c
+++ b/drivers/platform/x86/dell/alienware-wmi.c
@@ -252,6 +252,15 @@ static const struct dmi_system_id alienware_quirks[] __initconst = {
},
{
.callback = dmi_matched,
+ .ident = "Alienware m15 R7",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m15 R7"),
+ },
+ .driver_data = &quirk_x_series,
+ },
+ {
+ .callback = dmi_matched,
.ident = "Alienware m16 R1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
index 230e6ee96636..d8f1bf5e58a0 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
@@ -45,7 +45,7 @@ static ssize_t current_password_store(struct kobject *kobj,
int length;
length = strlen(buf);
- if (buf[length-1] == '\n')
+ if (length && buf[length - 1] == '\n')
length--;
/* firmware does verifiation of min/max password length,
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index a0eae24ca9e6..162809140f68 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -17,13 +17,13 @@
/*
* fujitsu-laptop.c - Fujitsu laptop support, providing access to additional
* features made available on a range of Fujitsu laptops including the
- * P2xxx/P5xxx/S6xxx/S7xxx series.
+ * P2xxx/P5xxx/S2xxx/S6xxx/S7xxx series.
*
* This driver implements a vendor-specific backlight control interface for
* Fujitsu laptops and provides support for hotkeys present on certain Fujitsu
* laptops.
*
- * This driver has been tested on a Fujitsu Lifebook S6410, S7020 and
+ * This driver has been tested on a Fujitsu Lifebook S2110, S6410, S7020 and
* P8010. It should work on most P-series and S-series Lifebooks, but
* YMMV.
*
@@ -107,7 +107,11 @@
#define KEY2_CODE 0x411
#define KEY3_CODE 0x412
#define KEY4_CODE 0x413
-#define KEY5_CODE 0x420
+#define KEY5_CODE 0x414
+#define KEY6_CODE 0x415
+#define KEY7_CODE 0x416
+#define KEY8_CODE 0x417
+#define KEY9_CODE 0x420
/* Hotkey ringbuffer limits */
#define MAX_HOTKEY_RINGBUFFER_SIZE 100
@@ -560,7 +564,7 @@ static const struct key_entry keymap_default[] = {
{ KE_KEY, KEY2_CODE, { KEY_PROG2 } },
{ KE_KEY, KEY3_CODE, { KEY_PROG3 } },
{ KE_KEY, KEY4_CODE, { KEY_PROG4 } },
- { KE_KEY, KEY5_CODE, { KEY_RFKILL } },
+ { KE_KEY, KEY9_CODE, { KEY_RFKILL } },
/* Soft keys read from status flags */
{ KE_KEY, FLAG_RFKILL, { KEY_RFKILL } },
{ KE_KEY, FLAG_TOUCHPAD_TOGGLE, { KEY_TOUCHPAD_TOGGLE } },
@@ -584,6 +588,18 @@ static const struct key_entry keymap_p8010[] = {
{ KE_END, 0 }
};
+static const struct key_entry keymap_s2110[] = {
+ { KE_KEY, KEY1_CODE, { KEY_PROG1 } }, /* "A" */
+ { KE_KEY, KEY2_CODE, { KEY_PROG2 } }, /* "B" */
+ { KE_KEY, KEY3_CODE, { KEY_WWW } }, /* "Internet" */
+ { KE_KEY, KEY4_CODE, { KEY_EMAIL } }, /* "E-mail" */
+ { KE_KEY, KEY5_CODE, { KEY_STOPCD } },
+ { KE_KEY, KEY6_CODE, { KEY_PLAYPAUSE } },
+ { KE_KEY, KEY7_CODE, { KEY_PREVIOUSSONG } },
+ { KE_KEY, KEY8_CODE, { KEY_NEXTSONG } },
+ { KE_END, 0 }
+};
+
static const struct key_entry *keymap = keymap_default;
static int fujitsu_laptop_dmi_keymap_override(const struct dmi_system_id *id)
@@ -621,6 +637,15 @@ static const struct dmi_system_id fujitsu_laptop_dmi_table[] = {
},
.driver_data = (void *)keymap_p8010
},
+ {
+ .callback = fujitsu_laptop_dmi_keymap_override,
+ .ident = "Fujitsu LifeBook S2110",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S2110"),
+ },
+ .driver_data = (void *)keymap_s2110
+ },
{}
};
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 30bd366d7b58..b740c7bb8101 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -1308,6 +1308,16 @@ static const struct key_entry ideapad_keymap[] = {
/* Specific to some newer models */
{ KE_KEY, 0x3e | IDEAPAD_WMI_KEY, { KEY_MICMUTE } },
{ KE_KEY, 0x3f | IDEAPAD_WMI_KEY, { KEY_RFKILL } },
+ /* Star- (User Assignable Key) */
+ { KE_KEY, 0x44 | IDEAPAD_WMI_KEY, { KEY_PROG1 } },
+ /* Eye */
+ { KE_KEY, 0x45 | IDEAPAD_WMI_KEY, { KEY_PROG3 } },
+ /* Performance toggle also Fn+Q, handled inside ideapad_wmi_notify() */
+ { KE_KEY, 0x3d | IDEAPAD_WMI_KEY, { KEY_PROG4 } },
+ /* shift + prtsc */
+ { KE_KEY, 0x2d | IDEAPAD_WMI_KEY, { KEY_CUT } },
+ { KE_KEY, 0x29 | IDEAPAD_WMI_KEY, { KEY_TOUCHPAD_TOGGLE } },
+ { KE_KEY, 0x2a | IDEAPAD_WMI_KEY, { KEY_ROOT_MENU } },
{ KE_END },
};
@@ -2094,6 +2104,12 @@ static void ideapad_wmi_notify(struct wmi_device *wdev, union acpi_object *data)
dev_dbg(&wdev->dev, "WMI fn-key event: 0x%llx\n",
data->integer.value);
+ /* performance button triggered by 0x3d */
+ if (data->integer.value == 0x3d && priv->dytc) {
+ platform_profile_cycle();
+ break;
+ }
+
/* 0x02 FnLock, 0x03 Esc */
if (data->integer.value == 0x02 || data->integer.value == 0x03)
ideapad_fn_lock_led_notify(priv, data->integer.value == 0x02);
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index 88a1a9ff2f34..0b5e43444ed6 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -44,16 +44,17 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex Hung");
static const struct acpi_device_id intel_hid_ids[] = {
- {"INT33D5", 0},
- {"INTC1051", 0},
- {"INTC1054", 0},
- {"INTC1070", 0},
- {"INTC1076", 0},
- {"INTC1077", 0},
- {"INTC1078", 0},
- {"INTC107B", 0},
- {"INTC10CB", 0},
- {"", 0},
+ { "INT33D5" },
+ { "INTC1051" },
+ { "INTC1054" },
+ { "INTC1070" },
+ { "INTC1076" },
+ { "INTC1077" },
+ { "INTC1078" },
+ { "INTC107B" },
+ { "INTC10CB" },
+ { "INTC10CC" },
+ { }
};
MODULE_DEVICE_TABLE(acpi, intel_hid_ids);
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
index 40bbf8e45fa4..bdee5d00f30b 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
@@ -146,15 +146,13 @@ static int uncore_event_cpu_online(unsigned int cpu)
{
struct uncore_data *data;
int target;
+ int ret;
/* Check if there is an online cpu in the package for uncore MSR */
target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
if (target < nr_cpu_ids)
return 0;
- /* Use this CPU on this die as a control CPU */
- cpumask_set_cpu(cpu, &uncore_cpu_mask);
-
data = uncore_get_instance(cpu);
if (!data)
return 0;
@@ -163,7 +161,14 @@ static int uncore_event_cpu_online(unsigned int cpu)
data->die_id = topology_die_id(cpu);
data->domain_id = UNCORE_DOMAIN_ID_INVALID;
- return uncore_freq_add_entry(data, cpu);
+ ret = uncore_freq_add_entry(data, cpu);
+ if (ret)
+ return ret;
+
+ /* Use this CPU on this die as a control CPU */
+ cpumask_set_cpu(cpu, &uncore_cpu_mask);
+
+ return 0;
}
static int uncore_event_cpu_offline(unsigned int cpu)
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index 323316ac6783..e4b3b20d03f3 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -1065,8 +1065,8 @@ static ssize_t current_value_store(struct kobject *kobj,
ret = -EINVAL;
goto out;
}
- set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->display_name,
- new_setting, tlmi_priv.pwd_admin->signature);
+ set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->name,
+ new_setting, tlmi_priv.pwd_admin->signature);
if (!set_str) {
ret = -ENOMEM;
goto out;
@@ -1096,7 +1096,7 @@ static ssize_t current_value_store(struct kobject *kobj,
goto out;
}
- set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->display_name,
+ set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->name,
new_setting);
if (!set_str) {
ret = -ENOMEM;
@@ -1124,11 +1124,11 @@ static ssize_t current_value_store(struct kobject *kobj,
}
if (auth_str)
- set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->display_name,
- new_setting, auth_str);
+ set_str = kasprintf(GFP_KERNEL, "%s,%s,%s", setting->name,
+ new_setting, auth_str);
else
- set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->display_name,
- new_setting);
+ set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->name,
+ new_setting);
if (!set_str) {
ret = -ENOMEM;
goto out;
@@ -1633,9 +1633,6 @@ static int tlmi_analyze(void)
continue;
}
- /* It is not allowed to have '/' for file name. Convert it into '\'. */
- strreplace(item, '/', '\\');
-
/* Remove the value part */
strreplace(item, ',', '\0');
@@ -1647,11 +1644,16 @@ static int tlmi_analyze(void)
goto fail_clear_attr;
}
setting->index = i;
+
+ strscpy(setting->name, item);
+ /* It is not allowed to have '/' for file name. Convert it into '\'. */
+ strreplace(item, '/', '\\');
strscpy(setting->display_name, item);
+
/* If BIOS selections supported, load those */
if (tlmi_priv.can_get_bios_selections) {
- ret = tlmi_get_bios_selections(setting->display_name,
- &setting->possible_values);
+ ret = tlmi_get_bios_selections(setting->name,
+ &setting->possible_values);
if (ret || !setting->possible_values)
pr_info("Error retrieving possible values for %d : %s\n",
i, setting->display_name);
diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h
index f267d8b46957..95a3d935edaa 100644
--- a/drivers/platform/x86/think-lmi.h
+++ b/drivers/platform/x86/think-lmi.h
@@ -88,6 +88,7 @@ struct tlmi_pwd_setting {
struct tlmi_attr_setting {
struct kobject kobj;
int index;
+ char name[TLMI_SETTINGS_MAXLEN];
char display_name[TLMI_SETTINGS_MAXLEN];
char *possible_values;
};
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 2ff38ca9ddb4..d0376ee1f8ce 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -232,6 +232,7 @@ enum tpacpi_hkey_event_t {
/* Thermal events */
TP_HKEY_EV_ALARM_BAT_HOT = 0x6011, /* battery too hot */
TP_HKEY_EV_ALARM_BAT_XHOT = 0x6012, /* battery critically hot */
+ TP_HKEY_EV_ALARM_BAT_LIM_CHANGE = 0x6013, /* battery charge limit changed*/
TP_HKEY_EV_ALARM_SENSOR_HOT = 0x6021, /* sensor too hot */
TP_HKEY_EV_ALARM_SENSOR_XHOT = 0x6022, /* sensor critically hot */
TP_HKEY_EV_THM_TABLE_CHANGED = 0x6030, /* windows; thermal table changed */
@@ -3780,6 +3781,10 @@ static bool hotkey_notify_6xxx(const u32 hkey, bool *send_acpi_ev)
pr_alert("THERMAL EMERGENCY: battery is extremely hot!\n");
/* recommended action: immediate sleep/hibernate */
break;
+ case TP_HKEY_EV_ALARM_BAT_LIM_CHANGE:
+ pr_debug("Battery Info: battery charge threshold changed\n");
+ /* User changed charging threshold. No action needed */
+ return true;
case TP_HKEY_EV_ALARM_SENSOR_HOT:
pr_crit("THERMAL ALARM: a sensor reports something is too hot!\n");
/* recommended action: warn user through gui, that */
@@ -11481,6 +11486,8 @@ static int __must_check __init get_thinkpad_model_data(
tp->vendor = PCI_VENDOR_ID_IBM;
else if (dmi_name_in_vendors("LENOVO"))
tp->vendor = PCI_VENDOR_ID_LENOVO;
+ else if (dmi_name_in_vendors("NEC"))
+ tp->vendor = PCI_VENDOR_ID_LENOVO;
else
return 0;
diff --git a/drivers/platform/x86/x86-android-tablets/dmi.c b/drivers/platform/x86/x86-android-tablets/dmi.c
index 3e5fa3b6e2fd..278c6d151dc4 100644
--- a/drivers/platform/x86/x86-android-tablets/dmi.c
+++ b/drivers/platform/x86/x86-android-tablets/dmi.c
@@ -180,6 +180,18 @@ const struct dmi_system_id x86_android_tablet_ids[] __initconst = {
.driver_data = (void *)&peaq_c1010_info,
},
{
+ /* Vexia Edu Atla 10 tablet 5V version */
+ .matches = {
+ /* Having all 3 of these not set is somewhat unique */
+ DMI_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+ DMI_MATCH(DMI_BOARD_NAME, "To be filled by O.E.M."),
+ /* Above strings are too generic, also match on BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "05/14/2015"),
+ },
+ .driver_data = (void *)&vexia_edu_atla10_5v_info,
+ },
+ {
/* Vexia Edu Atla 10 tablet 9V version */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
@@ -187,7 +199,7 @@ const struct dmi_system_id x86_android_tablet_ids[] __initconst = {
/* Above strings are too generic, also match on BIOS date */
DMI_MATCH(DMI_BIOS_DATE, "08/25/2014"),
},
- .driver_data = (void *)&vexia_edu_atla10_info,
+ .driver_data = (void *)&vexia_edu_atla10_9v_info,
},
{
/* Whitelabel (sold as various brands) TM800A550L */
diff --git a/drivers/platform/x86/x86-android-tablets/other.c b/drivers/platform/x86/x86-android-tablets/other.c
index 1d93d9edb23f..f7bd9f863c85 100644
--- a/drivers/platform/x86/x86-android-tablets/other.c
+++ b/drivers/platform/x86/x86-android-tablets/other.c
@@ -599,62 +599,122 @@ const struct x86_dev_info whitelabel_tm800a550l_info __initconst = {
};
/*
- * Vexia EDU ATLA 10 tablet, Android 4.2 / 4.4 + Guadalinex Ubuntu tablet
+ * Vexia EDU ATLA 10 tablet 5V, Android 4.4 + Guadalinex Ubuntu tablet
+ * distributed to schools in the Spanish Andalucía region.
+ */
+static const struct property_entry vexia_edu_atla10_5v_touchscreen_props[] = {
+ PROPERTY_ENTRY_U32("hid-descr-addr", 0x0000),
+ PROPERTY_ENTRY_U32("post-reset-deassert-delay-ms", 120),
+ { }
+};
+
+static const struct software_node vexia_edu_atla10_5v_touchscreen_node = {
+ .properties = vexia_edu_atla10_5v_touchscreen_props,
+};
+
+static const struct x86_i2c_client_info vexia_edu_atla10_5v_i2c_clients[] __initconst = {
+ {
+ /* kxcjk1013 accelerometer */
+ .board_info = {
+ .type = "kxcjk1013",
+ .addr = 0x0f,
+ .dev_name = "kxcjk1013",
+ },
+ .adapter_path = "\\_SB_.I2C3",
+ }, {
+ /* touchscreen controller */
+ .board_info = {
+ .type = "hid-over-i2c",
+ .addr = 0x38,
+ .dev_name = "FTSC1000",
+ .swnode = &vexia_edu_atla10_5v_touchscreen_node,
+ },
+ .adapter_path = "\\_SB_.I2C4",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_APIC,
+ .index = 0x44,
+ .trigger = ACPI_LEVEL_SENSITIVE,
+ .polarity = ACPI_ACTIVE_HIGH,
+ },
+ }
+};
+
+static struct gpiod_lookup_table vexia_edu_atla10_5v_ft5416_gpios = {
+ .dev_id = "i2c-FTSC1000",
+ .table = {
+ GPIO_LOOKUP("INT33FC:01", 26, "reset", GPIO_ACTIVE_LOW),
+ { }
+ },
+};
+
+static struct gpiod_lookup_table * const vexia_edu_atla10_5v_gpios[] = {
+ &vexia_edu_atla10_5v_ft5416_gpios,
+ NULL
+};
+
+const struct x86_dev_info vexia_edu_atla10_5v_info __initconst = {
+ .i2c_client_info = vexia_edu_atla10_5v_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(vexia_edu_atla10_5v_i2c_clients),
+ .gpiod_lookup_tables = vexia_edu_atla10_5v_gpios,
+};
+
+/*
+ * Vexia EDU ATLA 10 tablet 9V, Android 4.2 + Guadalinex Ubuntu tablet
* distributed to schools in the Spanish Andalucía region.
*/
static const char * const crystal_cove_pwrsrc_psy[] = { "crystal_cove_pwrsrc" };
-static const struct property_entry vexia_edu_atla10_ulpmc_props[] = {
+static const struct property_entry vexia_edu_atla10_9v_ulpmc_props[] = {
PROPERTY_ENTRY_STRING_ARRAY("supplied-from", crystal_cove_pwrsrc_psy),
{ }
};
-static const struct software_node vexia_edu_atla10_ulpmc_node = {
- .properties = vexia_edu_atla10_ulpmc_props,
+static const struct software_node vexia_edu_atla10_9v_ulpmc_node = {
+ .properties = vexia_edu_atla10_9v_ulpmc_props,
};
-static const char * const vexia_edu_atla10_accel_mount_matrix[] = {
+static const char * const vexia_edu_atla10_9v_accel_mount_matrix[] = {
"0", "-1", "0",
"1", "0", "0",
"0", "0", "1"
};
-static const struct property_entry vexia_edu_atla10_accel_props[] = {
- PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", vexia_edu_atla10_accel_mount_matrix),
+static const struct property_entry vexia_edu_atla10_9v_accel_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", vexia_edu_atla10_9v_accel_mount_matrix),
{ }
};
-static const struct software_node vexia_edu_atla10_accel_node = {
- .properties = vexia_edu_atla10_accel_props,
+static const struct software_node vexia_edu_atla10_9v_accel_node = {
+ .properties = vexia_edu_atla10_9v_accel_props,
};
-static const struct property_entry vexia_edu_atla10_touchscreen_props[] = {
+static const struct property_entry vexia_edu_atla10_9v_touchscreen_props[] = {
PROPERTY_ENTRY_U32("hid-descr-addr", 0x0000),
PROPERTY_ENTRY_U32("post-reset-deassert-delay-ms", 120),
{ }
};
-static const struct software_node vexia_edu_atla10_touchscreen_node = {
- .properties = vexia_edu_atla10_touchscreen_props,
+static const struct software_node vexia_edu_atla10_9v_touchscreen_node = {
+ .properties = vexia_edu_atla10_9v_touchscreen_props,
};
-static const struct property_entry vexia_edu_atla10_pmic_props[] = {
+static const struct property_entry vexia_edu_atla10_9v_pmic_props[] = {
PROPERTY_ENTRY_BOOL("linux,register-pwrsrc-power_supply"),
{ }
};
-static const struct software_node vexia_edu_atla10_pmic_node = {
- .properties = vexia_edu_atla10_pmic_props,
+static const struct software_node vexia_edu_atla10_9v_pmic_node = {
+ .properties = vexia_edu_atla10_9v_pmic_props,
};
-static const struct x86_i2c_client_info vexia_edu_atla10_i2c_clients[] __initconst = {
+static const struct x86_i2c_client_info vexia_edu_atla10_9v_i2c_clients[] __initconst = {
{
/* I2C attached embedded controller, used to access fuel-gauge */
.board_info = {
.type = "vexia_atla10_ec",
.addr = 0x76,
.dev_name = "ulpmc",
- .swnode = &vexia_edu_atla10_ulpmc_node,
+ .swnode = &vexia_edu_atla10_9v_ulpmc_node,
},
.adapter_path = "0000:00:18.1",
}, {
@@ -679,7 +739,7 @@ static const struct x86_i2c_client_info vexia_edu_atla10_i2c_clients[] __initcon
.type = "kxtj21009",
.addr = 0x0f,
.dev_name = "kxtj21009",
- .swnode = &vexia_edu_atla10_accel_node,
+ .swnode = &vexia_edu_atla10_9v_accel_node,
},
.adapter_path = "0000:00:18.5",
}, {
@@ -688,7 +748,7 @@ static const struct x86_i2c_client_info vexia_edu_atla10_i2c_clients[] __initcon
.type = "hid-over-i2c",
.addr = 0x38,
.dev_name = "FTSC1000",
- .swnode = &vexia_edu_atla10_touchscreen_node,
+ .swnode = &vexia_edu_atla10_9v_touchscreen_node,
},
.adapter_path = "0000:00:18.6",
.irq_data = {
@@ -703,7 +763,7 @@ static const struct x86_i2c_client_info vexia_edu_atla10_i2c_clients[] __initcon
.type = "intel_soc_pmic_crc",
.addr = 0x6e,
.dev_name = "intel_soc_pmic_crc",
- .swnode = &vexia_edu_atla10_pmic_node,
+ .swnode = &vexia_edu_atla10_9v_pmic_node,
},
.adapter_path = "0000:00:18.7",
.irq_data = {
@@ -715,7 +775,7 @@ static const struct x86_i2c_client_info vexia_edu_atla10_i2c_clients[] __initcon
}
};
-static const struct x86_serdev_info vexia_edu_atla10_serdevs[] __initconst = {
+static const struct x86_serdev_info vexia_edu_atla10_9v_serdevs[] __initconst = {
{
.ctrl.pci.devfn = PCI_DEVFN(0x1e, 3),
.ctrl_devname = "serial0",
@@ -723,7 +783,7 @@ static const struct x86_serdev_info vexia_edu_atla10_serdevs[] __initconst = {
},
};
-static struct gpiod_lookup_table vexia_edu_atla10_ft5416_gpios = {
+static struct gpiod_lookup_table vexia_edu_atla10_9v_ft5416_gpios = {
.dev_id = "i2c-FTSC1000",
.table = {
GPIO_LOOKUP("INT33FC:00", 60, "reset", GPIO_ACTIVE_LOW),
@@ -731,12 +791,12 @@ static struct gpiod_lookup_table vexia_edu_atla10_ft5416_gpios = {
},
};
-static struct gpiod_lookup_table * const vexia_edu_atla10_gpios[] = {
- &vexia_edu_atla10_ft5416_gpios,
+static struct gpiod_lookup_table * const vexia_edu_atla10_9v_gpios[] = {
+ &vexia_edu_atla10_9v_ft5416_gpios,
NULL
};
-static int __init vexia_edu_atla10_init(struct device *dev)
+static int __init vexia_edu_atla10_9v_init(struct device *dev)
{
struct pci_dev *pdev;
int ret;
@@ -760,13 +820,13 @@ static int __init vexia_edu_atla10_init(struct device *dev)
return 0;
}
-const struct x86_dev_info vexia_edu_atla10_info __initconst = {
- .i2c_client_info = vexia_edu_atla10_i2c_clients,
- .i2c_client_count = ARRAY_SIZE(vexia_edu_atla10_i2c_clients),
- .serdev_info = vexia_edu_atla10_serdevs,
- .serdev_count = ARRAY_SIZE(vexia_edu_atla10_serdevs),
- .gpiod_lookup_tables = vexia_edu_atla10_gpios,
- .init = vexia_edu_atla10_init,
+const struct x86_dev_info vexia_edu_atla10_9v_info __initconst = {
+ .i2c_client_info = vexia_edu_atla10_9v_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(vexia_edu_atla10_9v_i2c_clients),
+ .serdev_info = vexia_edu_atla10_9v_serdevs,
+ .serdev_count = ARRAY_SIZE(vexia_edu_atla10_9v_serdevs),
+ .gpiod_lookup_tables = vexia_edu_atla10_9v_gpios,
+ .init = vexia_edu_atla10_9v_init,
.use_pci = true,
};
diff --git a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
index 63a38a0069ba..dcf8d49e3b5f 100644
--- a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
+++ b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
@@ -127,7 +127,8 @@ extern const struct x86_dev_info nextbook_ares8_info;
extern const struct x86_dev_info nextbook_ares8a_info;
extern const struct x86_dev_info peaq_c1010_info;
extern const struct x86_dev_info whitelabel_tm800a550l_info;
-extern const struct x86_dev_info vexia_edu_atla10_info;
+extern const struct x86_dev_info vexia_edu_atla10_5v_info;
+extern const struct x86_dev_info vexia_edu_atla10_9v_info;
extern const struct x86_dev_info xiaomi_mipad2_info;
extern const struct dmi_system_id x86_android_tablet_ids[];
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index 6c94137865c9..949445e92973 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -3091,7 +3091,7 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
/* Verify that the index is within a valid range. */
num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
"#power-domain-cells");
- if (index >= num_domains)
+ if (num_domains < 0 || index >= num_domains)
return NULL;
/* Allocate and register device on the genpd bus. */
diff --git a/drivers/pmdomain/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c
index 958d34d4821b..105fcaf13a34 100644
--- a/drivers/pmdomain/imx/gpcv2.c
+++ b/drivers/pmdomain/imx/gpcv2.c
@@ -1361,7 +1361,7 @@ static int imx_pgc_domain_probe(struct platform_device *pdev)
}
if (IS_ENABLED(CONFIG_LOCKDEP) &&
- of_property_read_bool(domain->dev->of_node, "power-domains"))
+ of_property_present(domain->dev->of_node, "power-domains"))
lockdep_set_subclass(&domain->genpd.mlock, 1);
ret = of_genpd_add_provider_simple(domain->dev->of_node,
diff --git a/drivers/pmdomain/renesas/rcar-gen4-sysc.c b/drivers/pmdomain/renesas/rcar-gen4-sysc.c
index 66409cff2083..e001b5c25bed 100644
--- a/drivers/pmdomain/renesas/rcar-gen4-sysc.c
+++ b/drivers/pmdomain/renesas/rcar-gen4-sysc.c
@@ -338,11 +338,6 @@ static int __init rcar_gen4_sysc_pd_init(void)
struct rcar_gen4_sysc_pd *pd;
size_t n;
- if (!area->name) {
- /* Skip NULLified area */
- continue;
- }
-
n = strlen(area->name) + 1;
pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL);
if (!pd) {
diff --git a/drivers/pmdomain/renesas/rcar-sysc.c b/drivers/pmdomain/renesas/rcar-sysc.c
index b99326917330..1e2948523789 100644
--- a/drivers/pmdomain/renesas/rcar-sysc.c
+++ b/drivers/pmdomain/renesas/rcar-sysc.c
@@ -396,11 +396,6 @@ static int __init rcar_sysc_pd_init(void)
struct rcar_sysc_pd *pd;
size_t n;
- if (!area->name) {
- /* Skip NULLified area */
- continue;
- }
-
n = strlen(area->name) + 1;
pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL);
if (!pd) {
diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c
index 3c3158f31a48..f4cf129a0b68 100644
--- a/drivers/power/supply/axp20x_battery.c
+++ b/drivers/power/supply/axp20x_battery.c
@@ -89,6 +89,8 @@
#define AXP717_BAT_CC_MIN_UA 0
#define AXP717_BAT_CC_MAX_UA 3008000
+#define AXP717_TS_PIN_DISABLE BIT(4)
+
struct axp20x_batt_ps;
struct axp_data {
@@ -117,6 +119,7 @@ struct axp20x_batt_ps {
/* Maximum constant charge current */
unsigned int max_ccc;
const struct axp_data *data;
+ bool ts_disable;
};
static int axp20x_battery_get_max_voltage(struct axp20x_batt_ps *axp20x_batt,
@@ -984,6 +987,24 @@ static void axp717_set_battery_info(struct platform_device *pdev,
int ccc = info->constant_charge_current_max_ua;
int val;
+ axp_batt->ts_disable = (device_property_read_bool(axp_batt->dev,
+ "x-powers,no-thermistor"));
+
+ /*
+ * Under rare conditions an incorrectly programmed efuse for
+ * the temp sensor on the PMIC may trigger a fault condition.
+ * Allow users to hard-code if the ts pin is not used to work
+ * around this problem. Note that this requires the battery
+ * be correctly defined in the device tree with a monitored
+ * battery node.
+ */
+ if (axp_batt->ts_disable) {
+ regmap_update_bits(axp_batt->regmap,
+ AXP717_TS_PIN_CFG,
+ AXP717_TS_PIN_DISABLE,
+ AXP717_TS_PIN_DISABLE);
+ }
+
if (vmin > 0 && axp717_set_voltage_min_design(axp_batt, vmin))
dev_err(&pdev->dev,
"couldn't set voltage_min_design\n");
diff --git a/drivers/pps/generators/pps_gen-dummy.c b/drivers/pps/generators/pps_gen-dummy.c
index b284c200cbe5..55de4aecf35e 100644
--- a/drivers/pps/generators/pps_gen-dummy.c
+++ b/drivers/pps/generators/pps_gen-dummy.c
@@ -61,7 +61,7 @@ static int pps_gen_dummy_enable(struct pps_gen_device *pps_gen, bool enable)
* The PPS info struct
*/
-static struct pps_gen_source_info pps_gen_dummy_info = {
+static const struct pps_gen_source_info pps_gen_dummy_info = {
.use_system_clock = true,
.get_time = pps_gen_dummy_get_time,
.enable = pps_gen_dummy_enable,
diff --git a/drivers/pps/generators/pps_gen.c b/drivers/pps/generators/pps_gen.c
index ca592f1736f4..5b8bb454913c 100644
--- a/drivers/pps/generators/pps_gen.c
+++ b/drivers/pps/generators/pps_gen.c
@@ -66,7 +66,7 @@ static long pps_gen_cdev_ioctl(struct file *file,
if (ret)
return -EFAULT;
- ret = pps_gen->info.enable(pps_gen, status);
+ ret = pps_gen->info->enable(pps_gen, status);
if (ret)
return ret;
pps_gen->enabled = status;
@@ -76,7 +76,7 @@ static long pps_gen_cdev_ioctl(struct file *file,
case PPS_GEN_USESYSTEMCLOCK:
dev_dbg(pps_gen->dev, "PPS_GEN_USESYSTEMCLOCK\n");
- ret = put_user(pps_gen->info.use_system_clock, uiuarg);
+ ret = put_user(pps_gen->info->use_system_clock, uiuarg);
if (ret)
return -EFAULT;
@@ -175,7 +175,7 @@ static int pps_gen_register_cdev(struct pps_gen_device *pps_gen)
devt = MKDEV(MAJOR(pps_gen_devt), pps_gen->id);
cdev_init(&pps_gen->cdev, &pps_gen_cdev_fops);
- pps_gen->cdev.owner = pps_gen->info.owner;
+ pps_gen->cdev.owner = pps_gen->info->owner;
err = cdev_add(&pps_gen->cdev, devt, 1);
if (err) {
@@ -183,8 +183,8 @@ static int pps_gen_register_cdev(struct pps_gen_device *pps_gen)
MAJOR(pps_gen_devt), pps_gen->id);
goto free_ida;
}
- pps_gen->dev = device_create(pps_gen_class, pps_gen->info.parent, devt,
- pps_gen, "pps-gen%d", pps_gen->id);
+ pps_gen->dev = device_create(pps_gen_class, pps_gen->info->parent, devt,
+ pps_gen, "pps-gen%d", pps_gen->id);
if (IS_ERR(pps_gen->dev)) {
err = PTR_ERR(pps_gen->dev);
goto del_cdev;
@@ -225,7 +225,7 @@ static void pps_gen_unregister_cdev(struct pps_gen_device *pps_gen)
* Return: the PPS generator device in case of success, and ERR_PTR(errno)
* otherwise.
*/
-struct pps_gen_device *pps_gen_register_source(struct pps_gen_source_info *info)
+struct pps_gen_device *pps_gen_register_source(const struct pps_gen_source_info *info)
{
struct pps_gen_device *pps_gen;
int err;
@@ -235,7 +235,7 @@ struct pps_gen_device *pps_gen_register_source(struct pps_gen_source_info *info)
err = -ENOMEM;
goto pps_gen_register_source_exit;
}
- pps_gen->info = *info;
+ pps_gen->info = info;
pps_gen->enabled = false;
init_waitqueue_head(&pps_gen->queue);
diff --git a/drivers/pps/generators/sysfs.c b/drivers/pps/generators/sysfs.c
index faf8b1c6d202..6d6bc0006fea 100644
--- a/drivers/pps/generators/sysfs.c
+++ b/drivers/pps/generators/sysfs.c
@@ -19,7 +19,7 @@ static ssize_t system_show(struct device *dev, struct device_attribute *attr,
{
struct pps_gen_device *pps_gen = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%d\n", pps_gen->info.use_system_clock);
+ return sysfs_emit(buf, "%d\n", pps_gen->info->use_system_clock);
}
static DEVICE_ATTR_RO(system);
@@ -30,7 +30,7 @@ static ssize_t time_show(struct device *dev, struct device_attribute *attr,
struct timespec64 time;
int ret;
- ret = pps_gen->info.get_time(pps_gen, &time);
+ ret = pps_gen->info->get_time(pps_gen, &time);
if (ret)
return ret;
@@ -49,7 +49,7 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- ret = pps_gen->info.enable(pps_gen, status);
+ ret = pps_gen->info->enable(pps_gen, status);
if (ret)
return ret;
pps_gen->enabled = status;
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 4b7344e1816e..e4be8b929196 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -315,6 +315,8 @@ struct ptp_ocp_serial_port {
#define OCP_BOARD_ID_LEN 13
#define OCP_SERIAL_LEN 6
#define OCP_SMA_NUM 4
+#define OCP_SIGNAL_NUM 4
+#define OCP_FREQ_NUM 4
enum {
PORT_GNSS,
@@ -342,8 +344,8 @@ struct ptp_ocp {
struct dcf_master_reg __iomem *dcf_out;
struct dcf_slave_reg __iomem *dcf_in;
struct tod_reg __iomem *nmea_out;
- struct frequency_reg __iomem *freq_in[4];
- struct ptp_ocp_ext_src *signal_out[4];
+ struct frequency_reg __iomem *freq_in[OCP_FREQ_NUM];
+ struct ptp_ocp_ext_src *signal_out[OCP_SIGNAL_NUM];
struct ptp_ocp_ext_src *pps;
struct ptp_ocp_ext_src *ts0;
struct ptp_ocp_ext_src *ts1;
@@ -378,10 +380,12 @@ struct ptp_ocp {
u32 utc_tai_offset;
u32 ts_window_adjust;
u64 fw_cap;
- struct ptp_ocp_signal signal[4];
+ struct ptp_ocp_signal signal[OCP_SIGNAL_NUM];
struct ptp_ocp_sma_connector sma[OCP_SMA_NUM];
const struct ocp_sma_op *sma_op;
struct dpll_device *dpll;
+ int signals_nr;
+ int freq_in_nr;
};
#define OCP_REQ_TIMESTAMP BIT(0)
@@ -2578,12 +2582,60 @@ static const struct ocp_sma_op ocp_fb_sma_op = {
.set_output = ptp_ocp_sma_fb_set_output,
};
+static int
+ptp_ocp_sma_adva_set_output(struct ptp_ocp *bp, int sma_nr, u32 val)
+{
+ u32 reg, mask, shift;
+ unsigned long flags;
+ u32 __iomem *gpio;
+
+ gpio = sma_nr > 2 ? &bp->sma_map1->gpio2 : &bp->sma_map2->gpio2;
+ shift = sma_nr & 1 ? 0 : 16;
+
+ mask = 0xffff << (16 - shift);
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ reg = ioread32(gpio);
+ reg = (reg & mask) | (val << shift);
+
+ iowrite32(reg, gpio);
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ return 0;
+}
+
+static int
+ptp_ocp_sma_adva_set_inputs(struct ptp_ocp *bp, int sma_nr, u32 val)
+{
+ u32 reg, mask, shift;
+ unsigned long flags;
+ u32 __iomem *gpio;
+
+ gpio = sma_nr > 2 ? &bp->sma_map2->gpio1 : &bp->sma_map1->gpio1;
+ shift = sma_nr & 1 ? 0 : 16;
+
+ mask = 0xffff << (16 - shift);
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ reg = ioread32(gpio);
+ reg = (reg & mask) | (val << shift);
+
+ iowrite32(reg, gpio);
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ return 0;
+}
+
static const struct ocp_sma_op ocp_adva_sma_op = {
.tbl = { ptp_ocp_adva_sma_in, ptp_ocp_adva_sma_out },
.init = ptp_ocp_sma_fb_init,
.get = ptp_ocp_sma_fb_get,
- .set_inputs = ptp_ocp_sma_fb_set_inputs,
- .set_output = ptp_ocp_sma_fb_set_output,
+ .set_inputs = ptp_ocp_sma_adva_set_inputs,
+ .set_output = ptp_ocp_sma_adva_set_output,
};
static int
@@ -2649,6 +2701,8 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
bp->eeprom_map = fb_eeprom_map;
bp->fw_version = ioread32(&bp->image->version);
bp->sma_op = &ocp_fb_sma_op;
+ bp->signals_nr = 4;
+ bp->freq_in_nr = 4;
ptp_ocp_fb_set_version(bp);
@@ -2814,6 +2868,8 @@ ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
bp->fw_version = ioread32(&bp->reg->version);
bp->fw_tag = 2;
bp->sma_op = &ocp_art_sma_op;
+ bp->signals_nr = 4;
+ bp->freq_in_nr = 4;
/* Enable MAC serial port during initialisation */
iowrite32(1, &bp->board_config->mro50_serial_activate);
@@ -2840,6 +2896,8 @@ ptp_ocp_adva_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
bp->flash_start = 0xA00000;
bp->eeprom_map = fb_eeprom_map;
bp->sma_op = &ocp_adva_sma_op;
+ bp->signals_nr = 2;
+ bp->freq_in_nr = 2;
version = ioread32(&bp->image->version);
/* if lower 16 bits are empty, this is the fw loader. */
@@ -3960,7 +4018,7 @@ _signal_summary_show(struct seq_file *s, struct ptp_ocp *bp, int nr)
{
struct signal_reg __iomem *reg = bp->signal_out[nr]->mem;
struct ptp_ocp_signal *signal = &bp->signal[nr];
- char label[8];
+ char label[16];
bool on;
u32 val;
@@ -3986,7 +4044,7 @@ static void
_frequency_summary_show(struct seq_file *s, int nr,
struct frequency_reg __iomem *reg)
{
- char label[8];
+ char label[16];
bool on;
u32 val;
@@ -4130,11 +4188,11 @@ ptp_ocp_summary_show(struct seq_file *s, void *data)
}
if (bp->fw_cap & OCP_CAP_SIGNAL)
- for (i = 0; i < 4; i++)
+ for (i = 0; i < bp->signals_nr; i++)
_signal_summary_show(s, bp, i);
if (bp->fw_cap & OCP_CAP_FREQ)
- for (i = 0; i < 4; i++)
+ for (i = 0; i < bp->freq_in_nr; i++)
_frequency_summary_show(s, i, bp->freq_in[i]);
if (bp->irig_out) {
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index ccd54c089bab..0e4df71c2cef 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -322,7 +322,7 @@ static int __pwm_set_waveform(struct pwm_device *pwm,
const struct pwm_ops *ops = chip->ops;
char wfhw[WFHWSIZE];
struct pwm_waveform wf_rounded;
- int err;
+ int err, ret_tohw;
BUG_ON(WFHWSIZE < ops->sizeof_wfhw);
@@ -332,16 +332,16 @@ static int __pwm_set_waveform(struct pwm_device *pwm,
if (!pwm_wf_valid(wf))
return -EINVAL;
- err = __pwm_round_waveform_tohw(chip, pwm, wf, &wfhw);
- if (err)
- return err;
+ ret_tohw = __pwm_round_waveform_tohw(chip, pwm, wf, &wfhw);
+ if (ret_tohw < 0)
+ return ret_tohw;
if ((IS_ENABLED(CONFIG_PWM_DEBUG) || exact) && wf->period_length_ns) {
err = __pwm_round_waveform_fromhw(chip, pwm, &wfhw, &wf_rounded);
if (err)
return err;
- if (IS_ENABLED(CONFIG_PWM_DEBUG) && !pwm_check_rounding(wf, &wf_rounded))
+ if (IS_ENABLED(CONFIG_PWM_DEBUG) && ret_tohw == 0 && !pwm_check_rounding(wf, &wf_rounded))
dev_err(&chip->dev, "Wrong rounding: requested %llu/%llu [+%llu], result %llu/%llu [+%llu]\n",
wf->duty_length_ns, wf->period_length_ns, wf->duty_offset_ns,
wf_rounded.duty_length_ns, wf_rounded.period_length_ns, wf_rounded.duty_offset_ns);
@@ -382,7 +382,8 @@ static int __pwm_set_waveform(struct pwm_device *pwm,
wf_rounded.duty_length_ns, wf_rounded.period_length_ns, wf_rounded.duty_offset_ns,
wf_set.duty_length_ns, wf_set.period_length_ns, wf_set.duty_offset_ns);
}
- return 0;
+
+ return ret_tohw;
}
/**
diff --git a/drivers/pwm/pwm-axi-pwmgen.c b/drivers/pwm/pwm-axi-pwmgen.c
index 4259a0db9ff4..4337c8f5acf0 100644
--- a/drivers/pwm/pwm-axi-pwmgen.c
+++ b/drivers/pwm/pwm-axi-pwmgen.c
@@ -75,6 +75,7 @@ static int axi_pwmgen_round_waveform_tohw(struct pwm_chip *chip,
{
struct axi_pwmgen_waveform *wfhw = _wfhw;
struct axi_pwmgen_ddata *ddata = axi_pwmgen_ddata_from_chip(chip);
+ int ret = 0;
if (wf->period_length_ns == 0) {
*wfhw = (struct axi_pwmgen_waveform){
@@ -91,12 +92,15 @@ static int axi_pwmgen_round_waveform_tohw(struct pwm_chip *chip,
if (wfhw->period_cnt == 0) {
/*
* The specified period is too short for the hardware.
- * Let's round .duty_cycle down to 0 to get a (somewhat)
- * valid result.
+ * So round up .period_cnt to 1 (i.e. the smallest
+ * possible period). With .duty_cycle and .duty_offset
+ * being less than or equal to .period, their rounded
+ * value must be 0.
*/
wfhw->period_cnt = 1;
wfhw->duty_cycle_cnt = 0;
wfhw->duty_offset_cnt = 0;
+ ret = 1;
} else {
wfhw->duty_cycle_cnt = min_t(u64,
mul_u64_u32_div(wf->duty_length_ns, ddata->clk_rate_hz, NSEC_PER_SEC),
@@ -111,7 +115,7 @@ static int axi_pwmgen_round_waveform_tohw(struct pwm_chip *chip,
pwm->hwpwm, wf->duty_length_ns, wf->period_length_ns, wf->duty_offset_ns,
ddata->clk_rate_hz, wfhw->period_cnt, wfhw->duty_cycle_cnt, wfhw->duty_offset_cnt);
- return 0;
+ return ret;
}
static int axi_pwmgen_round_waveform_fromhw(struct pwm_chip *chip, struct pwm_device *pwm,
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index 40f7dba42b5a..404cbe32711e 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
#define AD5398_CURRENT_EN_MASK 0x8000
@@ -221,15 +222,20 @@ static int ad5398_probe(struct i2c_client *client)
const struct ad5398_current_data_format *df =
(struct ad5398_current_data_format *)id->driver_data;
- if (!init_data)
- return -EINVAL;
-
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
config.dev = &client->dev;
+ if (client->dev.of_node)
+ init_data = of_get_regulator_init_data(&client->dev,
+ client->dev.of_node,
+ &ad5398_reg);
+ if (!init_data)
+ return -EINVAL;
+
config.init_data = init_data;
+ config.of_node = client->dev.of_node;
config.driver_data = chip;
chip->client = client;
diff --git a/drivers/regulator/max20086-regulator.c b/drivers/regulator/max20086-regulator.c
index 59eb23d467ec..198d45f8e884 100644
--- a/drivers/regulator/max20086-regulator.c
+++ b/drivers/regulator/max20086-regulator.c
@@ -132,7 +132,7 @@ static int max20086_regulators_register(struct max20086 *chip)
static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on)
{
- struct of_regulator_match matches[MAX20086_MAX_REGULATORS] = { };
+ struct of_regulator_match *matches;
struct device_node *node;
unsigned int i;
int ret;
@@ -143,6 +143,11 @@ static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on)
return -ENODEV;
}
+ matches = devm_kcalloc(chip->dev, chip->info->num_outputs,
+ sizeof(*matches), GFP_KERNEL);
+ if (!matches)
+ return -ENOMEM;
+
for (i = 0; i < chip->info->num_outputs; ++i)
matches[i].name = max20086_output_names[i];
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 7d82bd1b36df..1e8142479656 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -270,8 +270,8 @@ static const unsigned int rk817_buck1_4_ramp_table[] = {
static int rk806_set_mode_dcdc(struct regulator_dev *rdev, unsigned int mode)
{
- int rid = rdev_get_id(rdev);
- int ctr_bit, reg;
+ unsigned int rid = rdev_get_id(rdev);
+ unsigned int ctr_bit, reg;
reg = RK806_POWER_FPWM_EN0 + rid / 8;
ctr_bit = rid % 8;
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index 5b5664603eed..2c7e519a2254 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -117,10 +117,10 @@ static const struct wcnss_data pronto_v1_data = {
.pmu_offset = 0x1004,
.spare_offset = 0x1088,
- .pd_names = { "mx", "cx" },
+ .pd_names = { "cx", "mx" },
.vregs = (struct wcnss_vreg_info[]) {
- { "vddmx", 950000, 1150000, 0 },
{ "vddcx", .super_turbo = true},
+ { "vddmx", 950000, 1150000, 0 },
{ "vddpx", 1800000, 1800000, 0 },
},
.num_pd_vregs = 2,
@@ -131,10 +131,10 @@ static const struct wcnss_data pronto_v2_data = {
.pmu_offset = 0x1004,
.spare_offset = 0x1088,
- .pd_names = { "mx", "cx" },
+ .pd_names = { "cx", "mx" },
.vregs = (struct wcnss_vreg_info[]) {
- { "vddmx", 1287500, 1287500, 0 },
{ "vddcx", .super_turbo = true },
+ { "vddmx", 1287500, 1287500, 0 },
{ "vddpx", 1800000, 1800000, 0 },
},
.num_pd_vregs = 2,
@@ -397,8 +397,17 @@ static irqreturn_t wcnss_stop_ack_interrupt(int irq, void *dev)
static int wcnss_init_pds(struct qcom_wcnss *wcnss,
const char * const pd_names[WCNSS_MAX_PDS])
{
+ struct device *dev = wcnss->dev;
int i, ret;
+ /* Handle single power domain */
+ if (dev->pm_domain) {
+ wcnss->pds[0] = dev;
+ wcnss->num_pds = 1;
+ pm_runtime_enable(dev);
+ return 0;
+ }
+
for (i = 0; i < WCNSS_MAX_PDS; i++) {
if (!pd_names[i])
break;
@@ -418,8 +427,15 @@ static int wcnss_init_pds(struct qcom_wcnss *wcnss,
static void wcnss_release_pds(struct qcom_wcnss *wcnss)
{
+ struct device *dev = wcnss->dev;
int i;
+ /* Handle single power domain */
+ if (wcnss->num_pds == 1 && dev->pm_domain) {
+ pm_runtime_disable(dev);
+ return;
+ }
+
for (i = 0; i < wcnss->num_pds; i++)
dev_pm_domain_detach(wcnss->pds[i], false);
}
@@ -437,10 +453,14 @@ static int wcnss_init_regulators(struct qcom_wcnss *wcnss,
* the regulators for the power domains. For old device trees we need to
* reserve extra space to manage them through the regulator interface.
*/
- if (wcnss->num_pds)
- info += num_pd_vregs;
- else
+ if (wcnss->num_pds) {
+ info += wcnss->num_pds;
+ /* Handle single power domain case */
+ if (wcnss->num_pds < num_pd_vregs)
+ num_vregs += num_pd_vregs - wcnss->num_pds;
+ } else {
num_vregs += num_pd_vregs;
+ }
bulk = devm_kcalloc(wcnss->dev,
num_vregs, sizeof(struct regulator_bulk_data),
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index e31fa0ad127e..a0afdeaac270 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -327,7 +327,7 @@ static void rtc_device_get_offset(struct rtc_device *rtc)
*
* Otherwise the offset seconds should be 0.
*/
- if (rtc->start_secs > rtc->range_max ||
+ if ((rtc->start_secs >= 0 && rtc->start_secs > rtc->range_max) ||
rtc->start_secs + range_secs - 1 < rtc->range_min)
rtc->offset_secs = rtc->start_secs - rtc->range_min;
else if (rtc->start_secs > rtc->range_min)
diff --git a/drivers/rtc/lib.c b/drivers/rtc/lib.c
index fe361652727a..13b5b1f20465 100644
--- a/drivers/rtc/lib.c
+++ b/drivers/rtc/lib.c
@@ -46,24 +46,38 @@ EXPORT_SYMBOL(rtc_year_days);
* rtc_time64_to_tm - converts time64_t to rtc_time.
*
* @time: The number of seconds since 01-01-1970 00:00:00.
- * (Must be positive.)
+ * Works for values since at least 1900
* @tm: Pointer to the struct rtc_time.
*/
void rtc_time64_to_tm(time64_t time, struct rtc_time *tm)
{
- unsigned int secs;
- int days;
+ int days, secs;
u64 u64tmp;
u32 u32tmp, udays, century, day_of_century, year_of_century, year,
day_of_year, month, day;
bool is_Jan_or_Feb, is_leap_year;
- /* time must be positive */
+ /*
+ * Get days and seconds while preserving the sign to
+ * handle negative time values (dates before 1970-01-01)
+ */
days = div_s64_rem(time, 86400, &secs);
+ /*
+ * We need 0 <= secs < 86400 which isn't given for negative
+ * values of time. Fixup accordingly.
+ */
+ if (secs < 0) {
+ days -= 1;
+ secs += 86400;
+ }
+
/* day of the week, 1970-01-01 was a Thursday */
tm->tm_wday = (days + 4) % 7;
+ /* Ensure tm_wday is always positive */
+ if (tm->tm_wday < 0)
+ tm->tm_wday += 7;
/*
* The following algorithm is, basically, Proposition 6.3 of Neri
@@ -93,7 +107,7 @@ void rtc_time64_to_tm(time64_t time, struct rtc_time *tm)
* thus, is slightly different from [1].
*/
- udays = ((u32) days) + 719468;
+ udays = days + 719468;
u32tmp = 4 * udays + 3;
century = u32tmp / 146097;
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 872e0b679be4..5efbe69bf5ca 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -1807,10 +1807,8 @@ static int ds1307_probe(struct i2c_client *client)
* For some variants, be sure alarms can trigger when we're
* running on Vbackup (BBSQI/BBSQW)
*/
- if (want_irq || ds1307_can_wakeup_device) {
+ if (want_irq || ds1307_can_wakeup_device)
regs[0] |= DS1337_BIT_INTCN | chip->bbsqi_bit;
- regs[0] &= ~(DS1337_BIT_A2IE | DS1337_BIT_A1IE);
- }
regmap_write(ds1307->regmap, DS1337_REG_CONTROL,
regs[0]);
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index 905986c61655..73848f764559 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -35,6 +35,7 @@
#define PCF85063_REG_CTRL1_CAP_SEL BIT(0)
#define PCF85063_REG_CTRL1_STOP BIT(5)
#define PCF85063_REG_CTRL1_EXT_TEST BIT(7)
+#define PCF85063_REG_CTRL1_SWR 0x58
#define PCF85063_REG_CTRL2 0x01
#define PCF85063_CTRL2_AF BIT(6)
@@ -589,7 +590,7 @@ static int pcf85063_probe(struct i2c_client *client)
i2c_set_clientdata(client, pcf85063);
- err = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL1, &tmp);
+ err = regmap_read(pcf85063->regmap, PCF85063_REG_SC, &tmp);
if (err) {
dev_err(&client->dev, "RTC chip is not present\n");
return err;
@@ -599,6 +600,22 @@ static int pcf85063_probe(struct i2c_client *client)
if (IS_ERR(pcf85063->rtc))
return PTR_ERR(pcf85063->rtc);
+ /*
+ * If a Power loss is detected, SW reset the device.
+ * From PCF85063A datasheet:
+ * There is a low probability that some devices will have corruption
+ * of the registers after the automatic power-on reset...
+ */
+ if (tmp & PCF85063_REG_SC_OS) {
+ dev_warn(&client->dev,
+ "POR issue detected, sending a SW reset\n");
+ err = regmap_write(pcf85063->regmap, PCF85063_REG_CTRL1,
+ PCF85063_REG_CTRL1_SWR);
+ if (err < 0)
+ dev_warn(&client->dev,
+ "SW reset failed, trying to continue\n");
+ }
+
err = pcf85063_load_capacitance(pcf85063, client->dev.of_node,
config->force_cap_7000 ? 7000 : 0);
if (err < 0)
diff --git a/drivers/rtc/rtc-rv3032.c b/drivers/rtc/rtc-rv3032.c
index 35b2e36b426a..cb01038a2e27 100644
--- a/drivers/rtc/rtc-rv3032.c
+++ b/drivers/rtc/rtc-rv3032.c
@@ -69,7 +69,7 @@
#define RV3032_CLKOUT2_FD_MSK GENMASK(6, 5)
#define RV3032_CLKOUT2_OS BIT(7)
-#define RV3032_CTRL1_EERD BIT(3)
+#define RV3032_CTRL1_EERD BIT(2)
#define RV3032_CTRL1_WADA BIT(5)
#define RV3032_CTRL2_STOP BIT(0)
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index e5d947c763ea..6a030ba38bf3 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -264,6 +264,19 @@ static struct console sclp_console =
};
/*
+ * Release allocated pages.
+ */
+static void __init __sclp_console_free_pages(void)
+{
+ struct list_head *page, *p;
+
+ list_for_each_safe(page, p, &sclp_con_pages) {
+ list_del(page);
+ free_page((unsigned long)page);
+ }
+}
+
+/*
* called by console_init() in drivers/char/tty_io.c at boot-time.
*/
static int __init
@@ -282,6 +295,10 @@ sclp_console_init(void)
/* Allocate pages for output buffering */
for (i = 0; i < sclp_console_pages; i++) {
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!page) {
+ __sclp_console_free_pages();
+ return -ENOMEM;
+ }
list_add_tail(page, &sclp_con_pages);
}
sclp_conbuf = NULL;
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 892c18d2f87e..d3edacb6ee14 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -490,6 +490,17 @@ static const struct tty_operations sclp_ops = {
.flush_buffer = sclp_tty_flush_buffer,
};
+/* Release allocated pages. */
+static void __init __sclp_tty_free_pages(void)
+{
+ struct list_head *page, *p;
+
+ list_for_each_safe(page, p, &sclp_tty_pages) {
+ list_del(page);
+ free_page((unsigned long)page);
+ }
+}
+
static int __init
sclp_tty_init(void)
{
@@ -516,6 +527,7 @@ sclp_tty_init(void)
for (i = 0; i < MAX_KMEM_PAGES; i++) {
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (page == NULL) {
+ __sclp_tty_free_pages();
tty_driver_kref_put(driver);
return -ENOMEM;
}
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index a52c2690933f..a6a062897955 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -863,48 +863,66 @@ static void vfio_ap_mdev_remove(struct mdev_device *mdev)
vfio_put_device(&matrix_mdev->vdev);
}
-#define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \
- "already assigned to %s"
+#define MDEV_SHARING_ERR "Userspace may not assign queue %02lx.%04lx to mdev: already assigned to %s"
-static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev,
- unsigned long *apm,
- unsigned long *aqm)
+#define MDEV_IN_USE_ERR "Can not reserve queue %02lx.%04lx for host driver: in use by mdev"
+
+static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *assignee,
+ struct ap_matrix_mdev *assigned_to,
+ unsigned long *apm, unsigned long *aqm)
{
unsigned long apid, apqi;
- const struct device *dev = mdev_dev(matrix_mdev->mdev);
- const char *mdev_name = dev_name(dev);
- for_each_set_bit_inv(apid, apm, AP_DEVICES)
+ for_each_set_bit_inv(apid, apm, AP_DEVICES) {
+ for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
+ dev_warn(mdev_dev(assignee->mdev), MDEV_SHARING_ERR,
+ apid, apqi, dev_name(mdev_dev(assigned_to->mdev)));
+ }
+ }
+}
+
+static void vfio_ap_mdev_log_in_use_err(struct ap_matrix_mdev *assignee,
+ unsigned long *apm, unsigned long *aqm)
+{
+ unsigned long apid, apqi;
+
+ for_each_set_bit_inv(apid, apm, AP_DEVICES) {
for_each_set_bit_inv(apqi, aqm, AP_DOMAINS)
- dev_warn(dev, MDEV_SHARING_ERR, apid, apqi, mdev_name);
+ dev_warn(mdev_dev(assignee->mdev), MDEV_IN_USE_ERR, apid, apqi);
+ }
}
/**
* vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs
*
+ * @assignee: the matrix mdev to which @mdev_apm and @mdev_aqm are being
+ * assigned; or, NULL if this function was called by the AP bus
+ * driver in_use callback to verify none of the APQNs being reserved
+ * for the host device driver are in use by a vfio_ap mediated device
* @mdev_apm: mask indicating the APIDs of the APQNs to be verified
* @mdev_aqm: mask indicating the APQIs of the APQNs to be verified
*
- * Verifies that each APQN derived from the Cartesian product of a bitmap of
- * AP adapter IDs and AP queue indexes is not configured for any matrix
- * mediated device. AP queue sharing is not allowed.
+ * Verifies that each APQN derived from the Cartesian product of APIDs
+ * represented by the bits set in @mdev_apm and the APQIs of the bits set in
+ * @mdev_aqm is not assigned to a mediated device other than the mdev to which
+ * the APQN is being assigned (@assignee). AP queue sharing is not allowed.
*
* Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE.
*/
-static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm,
+static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *assignee,
+ unsigned long *mdev_apm,
unsigned long *mdev_aqm)
{
- struct ap_matrix_mdev *matrix_mdev;
+ struct ap_matrix_mdev *assigned_to;
DECLARE_BITMAP(apm, AP_DEVICES);
DECLARE_BITMAP(aqm, AP_DOMAINS);
- list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ list_for_each_entry(assigned_to, &matrix_dev->mdev_list, node) {
/*
- * If the input apm and aqm are fields of the matrix_mdev
- * object, then move on to the next matrix_mdev.
+ * If the mdev to which the mdev_apm and mdev_aqm is being
+ * assigned is the same as the mdev being verified
*/
- if (mdev_apm == matrix_mdev->matrix.apm &&
- mdev_aqm == matrix_mdev->matrix.aqm)
+ if (assignee == assigned_to)
continue;
memset(apm, 0, sizeof(apm));
@@ -914,15 +932,16 @@ static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm,
* We work on full longs, as we can only exclude the leftover
* bits in non-inverse order. The leftover is all zeros.
*/
- if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm,
- AP_DEVICES))
+ if (!bitmap_and(apm, mdev_apm, assigned_to->matrix.apm, AP_DEVICES))
continue;
- if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm,
- AP_DOMAINS))
+ if (!bitmap_and(aqm, mdev_aqm, assigned_to->matrix.aqm, AP_DOMAINS))
continue;
- vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm);
+ if (assignee)
+ vfio_ap_mdev_log_sharing_err(assignee, assigned_to, apm, aqm);
+ else
+ vfio_ap_mdev_log_in_use_err(assigned_to, apm, aqm);
return -EADDRINUSE;
}
@@ -951,7 +970,8 @@ static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev)
matrix_mdev->matrix.aqm))
return -EADDRNOTAVAIL;
- return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm,
+ return vfio_ap_mdev_verify_no_sharing(matrix_mdev,
+ matrix_mdev->matrix.apm,
matrix_mdev->matrix.aqm);
}
@@ -2458,7 +2478,7 @@ int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm)
mutex_lock(&matrix_dev->guests_lock);
mutex_lock(&matrix_dev->mdevs_lock);
- ret = vfio_ap_mdev_verify_no_sharing(apm, aqm);
+ ret = vfio_ap_mdev_verify_no_sharing(NULL, apm, aqm);
mutex_unlock(&matrix_dev->mdevs_lock);
mutex_unlock(&matrix_dev->guests_lock);
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index abf6a82b74af..0be719f38377 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -3221,8 +3221,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
break;
}
fallthrough;
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
case REZERO_UNIT:
case REASSIGN_BLOCKS:
case SEEK_10:
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index e50a3dbf9de3..ef21b85cf014 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -591,7 +591,7 @@ datadir_t acornscsi_datadirection(int command)
case CHANGE_DEFINITION: case COMPARE: case COPY:
case COPY_VERIFY: case LOG_SELECT: case MODE_SELECT:
case MODE_SELECT_10: case SEND_DIAGNOSTIC: case WRITE_BUFFER:
- case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE:
+ case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE_6:
case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW:
case WRITE_6: case WRITE_10: case WRITE_VERIFY:
case UPDATE_BLOCK: case WRITE_LONG: case WRITE_SAME:
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 3596414d970b..7a484ad0f9ab 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -935,8 +935,28 @@ static void hisi_sas_phyup_work_common(struct work_struct *work,
container_of(work, typeof(*phy), works[event]);
struct hisi_hba *hisi_hba = phy->hisi_hba;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct asd_sas_port *sas_port = sas_phy->port;
+ struct hisi_sas_port *port = phy->port;
+ struct device *dev = hisi_hba->dev;
+ struct domain_device *port_dev;
int phy_no = sas_phy->id;
+ if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) &&
+ sas_port && port && (port->id != phy->port_id)) {
+ dev_info(dev, "phy%d's hw port id changed from %d to %llu\n",
+ phy_no, port->id, phy->port_id);
+ port_dev = sas_port->port_dev;
+ if (port_dev && !dev_is_expander(port_dev->dev_type)) {
+ /*
+ * Set the device state to gone to block
+ * sending IO to the device.
+ */
+ set_bit(SAS_DEV_GONE, &port_dev->state);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
+ return;
+ }
+ }
+
phy->wait_phyup_cnt = 0;
if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index cce6c6b409ad..94adb6ac02a4 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -3631,8 +3631,8 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
break;
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
scb->scsi_cmd->result = DID_OK << 16;
break;
@@ -3899,8 +3899,8 @@ ips_chkstatus(ips_ha_t * ha, IPS_STATUS * pstatus)
case WRITE_6:
case READ_10:
case WRITE_10:
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
break;
case MODE_SENSE:
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 1d7db49a8fe4..318dc83e9a2a 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -9569,18 +9569,16 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags);
/* First we need to issue aborts to outstanding cmds on txcmpl */
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+ if (piocb->vport != vport)
+ continue;
+
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2243 iotag = 0x%x cmd_flag = 0x%x "
- "ulp_command = 0x%x this_vport %x "
- "sli_flag = 0x%x\n",
+ "ulp_command = 0x%x sli_flag = 0x%x\n",
piocb->iotag, piocb->cmd_flag,
get_job_cmnd(phba, piocb),
- (piocb->vport == vport),
phba->sli.sli_flag);
- if (piocb->vport != vport)
- continue;
-
if ((phba->sli.sli_flag & LPFC_SLI_ACTIVE) && !mbx_tmo_err) {
if (piocb->cmd_flag & LPFC_IO_LIBDFC)
continue;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 36e66df36a18..07cd611f34bd 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -228,10 +228,16 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
return;
- /* check for recovered fabric node */
- if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
- ndlp->nlp_DID == Fabric_DID)
+ /* Ignore callback for a mismatched (stale) rport */
+ if (ndlp->rport != rport) {
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_NODE,
+ "6788 fc rport mismatch: d_id x%06x ndlp x%px "
+ "fc rport x%px node rport x%px state x%x "
+ "refcnt %u\n",
+ ndlp->nlp_DID, ndlp, rport, ndlp->rport,
+ ndlp->nlp_state, kref_read(&ndlp->kref));
return;
+ }
if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -5564,6 +5570,7 @@ static struct lpfc_nodelist *
__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
{
struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *np = NULL;
uint32_t data1;
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
@@ -5578,14 +5585,20 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1, ndlp->nlp_rpi,
ndlp->active_rrqs_xri_bitmap);
- return ndlp;
+
+ /* Check for new or potentially stale node */
+ if (ndlp->nlp_state != NLP_STE_UNUSED_NODE)
+ return ndlp;
+ np = ndlp;
}
}
- /* FIND node did <did> NOT FOUND */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0932 FIND node did x%x NOT FOUND.\n", did);
- return NULL;
+ if (!np)
+ /* FIND node did <did> NOT FOUND */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0932 FIND node did x%x NOT FOUND.\n", did);
+
+ return np;
}
struct lpfc_nodelist *
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index bcadf11414c8..411a6b927c5b 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -13170,6 +13170,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
eqhdl = lpfc_get_eq_hdl(0);
rc = pci_irq_vector(phba->pcidev, 0);
if (rc < 0) {
+ free_irq(phba->pcidev->irq, phba);
pci_free_irq_vectors(phba->pcidev);
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0496 MSI pci_irq_vec failed (%d)\n", rc);
@@ -13250,6 +13251,7 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
eqhdl = lpfc_get_eq_hdl(0);
retval = pci_irq_vector(phba->pcidev, 0);
if (retval < 0) {
+ free_irq(phba->pcidev->irq, phba);
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0502 INTR pci_irq_vec failed (%d)\n",
retval);
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index adab151663dd..2006094af418 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -855,8 +855,8 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
return scb;
#if MEGA_HAVE_CLUSTERING
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
/*
* Do we support clustering and is the support enabled
@@ -875,7 +875,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
}
scb->raw_mbox[0] = MEGA_CLUSTER_CMD;
- scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ?
+ scb->raw_mbox[2] = *cmd->cmnd == RESERVE_6 ?
MEGA_RESERVE_LD : MEGA_RELEASE_LD;
scb->raw_mbox[3] = ldrv_num;
@@ -1618,8 +1618,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
* failed or the input parameter is invalid
*/
if( status == 1 &&
- (cmd->cmnd[0] == RESERVE ||
- cmd->cmnd[0] == RELEASE) ) {
+ (cmd->cmnd[0] == RESERVE_6 ||
+ cmd->cmnd[0] == RELEASE_6) ) {
cmd->result |= (DID_ERROR << 16) |
SAM_STAT_RESERVATION_CONFLICT;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 60cc3372991f..3ba837b3093f 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -1725,8 +1725,8 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
return scb;
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
/*
* Do we support clustering and is the support enabled
*/
@@ -1748,7 +1748,7 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
scb->dev_channel = 0xFF;
scb->dev_target = target;
ccb->raw_mbox[0] = CLUSTER_CMD;
- ccb->raw_mbox[2] = (scp->cmnd[0] == RESERVE) ?
+ ccb->raw_mbox[2] = scp->cmnd[0] == RESERVE_6 ?
RESERVE_LD : RELEASE_LD;
ccb->raw_mbox[3] = target;
@@ -2334,8 +2334,8 @@ megaraid_mbox_dpc(unsigned long devp)
* Error code returned is 1 if Reserve or Release
* failed or the input parameter is invalid
*/
- if (status == 1 && (scp->cmnd[0] == RESERVE ||
- scp->cmnd[0] == RELEASE)) {
+ if (status == 1 && (scp->cmnd[0] == RESERVE_6 ||
+ scp->cmnd[0] == RELEASE_6)) {
scp->result = DID_ERROR << 16 |
SAM_STAT_RESERVATION_CONFLICT;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index ec5b1ab28717..604f37e5c0c3 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -174,6 +174,9 @@ static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
char *desc = NULL;
u16 event;
+ if (!(mrioc->logging_level & MPI3_DEBUG_EVENT))
+ return;
+
event = event_reply->event;
switch (event) {
@@ -563,7 +566,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
reply_qidx);
- atomic_dec(&op_reply_q->pend_ios);
+
if (reply_dma)
mpi3mr_repost_reply_buf(mrioc, reply_dma);
num_op_reply++;
@@ -2744,7 +2747,10 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
return;
}
- if (mrioc->ts_update_counter++ >= mrioc->ts_update_interval) {
+ if (!(mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC) &&
+ (mrioc->ts_update_counter++ >= mrioc->ts_update_interval)) {
+
mrioc->ts_update_counter = 0;
mpi3mr_sync_timestamp(mrioc);
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 87784c96249a..47faa27bc355 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -679,6 +679,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
size_t data_in_sz = 0;
long ret;
u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
+ int tm_ret;
issue_reset = 0;
@@ -1120,18 +1121,25 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
if (pcie_device && (!ioc->tm_custom_handling) &&
(!(mpt3sas_scsih_is_pcie_scsi_device(
pcie_device->device_info))))
- mpt3sas_scsih_issue_locked_tm(ioc,
+ tm_ret = mpt3sas_scsih_issue_locked_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1),
0, 0, 0,
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
0, pcie_device->reset_timeout,
MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE);
else
- mpt3sas_scsih_issue_locked_tm(ioc,
+ tm_ret = mpt3sas_scsih_issue_locked_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1),
0, 0, 0,
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET);
+
+ if (tm_ret != SUCCESS) {
+ ioc_info(ioc,
+ "target reset failed, issue hard reset: handle (0x%04x)\n",
+ le16_to_cpu(mpi_request->FunctionDependent1));
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ }
} else
mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
}
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 183ce00aa671..f7067878b34f 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -766,6 +766,7 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
spin_lock_irqsave(&pm8001_ha->lock, flags);
}
PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
+ pm8001_ha->phy[pm8001_dev->attached_phy].phy_attached = 0;
pm8001_free_dev(pm8001_dev);
} else {
pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index a77e0499b738..9d2db5bc8ee7 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -695,26 +695,23 @@ void scsi_cdl_check(struct scsi_device *sdev)
*/
int scsi_cdl_enable(struct scsi_device *sdev, bool enable)
{
- struct scsi_mode_data data;
- struct scsi_sense_hdr sshdr;
- struct scsi_vpd *vpd;
- bool is_ata = false;
char buf[64];
+ bool is_ata;
int ret;
if (!sdev->cdl_supported)
return -EOPNOTSUPP;
rcu_read_lock();
- vpd = rcu_dereference(sdev->vpd_pg89);
- if (vpd)
- is_ata = true;
+ is_ata = rcu_dereference(sdev->vpd_pg89);
rcu_read_unlock();
/*
* For ATA devices, CDL needs to be enabled with a SET FEATURES command.
*/
if (is_ata) {
+ struct scsi_mode_data data;
+ struct scsi_sense_hdr sshdr;
char *buf_data;
int len;
@@ -723,16 +720,30 @@ int scsi_cdl_enable(struct scsi_device *sdev, bool enable)
if (ret)
return -EINVAL;
- /* Enable CDL using the ATA feature page */
+ /* Enable or disable CDL using the ATA feature page */
len = min_t(size_t, sizeof(buf),
data.length - data.header_length -
data.block_descriptor_length);
buf_data = buf + data.header_length +
data.block_descriptor_length;
- if (enable)
- buf_data[4] = 0x02;
- else
- buf_data[4] = 0;
+
+ /*
+ * If we want to enable CDL and CDL is already enabled on the
+ * device, do nothing. This avoids needlessly resetting the CDL
+ * statistics on the device as that is implied by the CDL enable
+ * action. Similar to this, there is no need to do anything if
+ * we want to disable CDL and CDL is already disabled.
+ */
+ if (enable) {
+ if ((buf_data[4] & 0x03) == 0x02)
+ goto out;
+ buf_data[4] &= ~0x03;
+ buf_data[4] |= 0x02;
+ } else {
+ if ((buf_data[4] & 0x03) == 0x00)
+ goto out;
+ buf_data[4] &= ~0x03;
+ }
ret = scsi_mode_select(sdev, 1, 0, buf_data, len, 5 * HZ, 3,
&data, &sshdr);
@@ -744,6 +755,7 @@ int scsi_cdl_enable(struct scsi_device *sdev, bool enable)
}
}
+out:
sdev->cdl_enable = enable;
return 0;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 5ceaa4665e5d..4da0c259390b 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -173,6 +173,10 @@ static const char *sdebug_version_date = "20210520";
#define DEF_ZBC_MAX_OPEN_ZONES 8
#define DEF_ZBC_NR_CONV_ZONES 1
+/* Default parameters for tape drives */
+#define TAPE_DEF_DENSITY 0x0
+#define TAPE_DEF_BLKSIZE 0
+
#define SDEBUG_LUN_0_VAL 0
/* bit mask values for sdebug_opts */
@@ -363,6 +367,10 @@ struct sdebug_dev_info {
ktime_t create_ts; /* time since bootup that this device was created */
struct sdeb_zone_state *zstate;
+ /* For tapes */
+ unsigned int tape_blksize;
+ unsigned int tape_density;
+
struct dentry *debugfs_entry;
struct spinlock list_lock;
struct list_head inject_err_list;
@@ -773,7 +781,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
/* 20 */
{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
{6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
+ {0, 0x1, 0, 0, NULL, NULL, /* REWIND ?? */
{6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
@@ -2742,7 +2750,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
unsigned char *ap;
unsigned char *arr __free(kfree);
unsigned char *cmd = scp->cmnd;
- bool dbd, llbaa, msense_6, is_disk, is_zbc;
+ bool dbd, llbaa, msense_6, is_disk, is_zbc, is_tape;
arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
if (!arr)
@@ -2755,7 +2763,8 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
is_disk = (sdebug_ptype == TYPE_DISK);
is_zbc = devip->zoned;
- if ((is_disk || is_zbc) && !dbd)
+ is_tape = (sdebug_ptype == TYPE_TAPE);
+ if ((is_disk || is_zbc || is_tape) && !dbd)
bd_len = llbaa ? 16 : 8;
else
bd_len = 0;
@@ -2793,15 +2802,25 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
put_unaligned_be32(0xffffffff, ap + 0);
else
put_unaligned_be32(sdebug_capacity, ap + 0);
- put_unaligned_be16(sdebug_sector_size, ap + 6);
+ if (is_tape) {
+ ap[0] = devip->tape_density;
+ put_unaligned_be16(devip->tape_blksize, ap + 6);
+ } else
+ put_unaligned_be16(sdebug_sector_size, ap + 6);
offset += bd_len;
ap = arr + offset;
} else if (16 == bd_len) {
+ if (is_tape) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, 4);
+ return check_condition_result;
+ }
put_unaligned_be64((u64)sdebug_capacity, ap + 0);
put_unaligned_be32(sdebug_sector_size, ap + 12);
offset += bd_len;
ap = arr + offset;
}
+ if (cmd[2] == 0)
+ goto only_bd; /* Only block descriptor requested */
/*
* N.B. If len>0 before resp_*_pg() call, then form of that call should be:
@@ -2902,6 +2921,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
default:
goto bad_pcode;
}
+only_bd:
if (msense_6)
arr[0] = offset - 1;
else
@@ -2945,8 +2965,27 @@ static int resp_mode_select(struct scsi_cmnd *scp,
__func__, param_len, res);
md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
- off = bd_len + (mselect6 ? 4 : 8);
- if (md_len > 2 || off >= res) {
+ off = (mselect6 ? 4 : 8);
+ if (sdebug_ptype == TYPE_TAPE) {
+ int blksize;
+
+ if (bd_len != 8) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA,
+ mselect6 ? 3 : 6, -1);
+ return check_condition_result;
+ }
+ blksize = get_unaligned_be16(arr + off + 6);
+ if ((blksize % 4) != 0) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, off + 6, -1);
+ return check_condition_result;
+ }
+ devip->tape_density = arr[off];
+ devip->tape_blksize = blksize;
+ }
+ off += bd_len;
+ if (off >= res)
+ return 0; /* No page written, just descriptors */
+ if (md_len > 2) {
mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
return check_condition_result;
}
@@ -5835,6 +5874,10 @@ static struct sdebug_dev_info *sdebug_device_create(
} else {
devip->zoned = false;
}
+ if (sdebug_ptype == TYPE_TAPE) {
+ devip->tape_density = TAPE_DEF_DENSITY;
+ devip->tape_blksize = TAPE_DEF_BLKSIZE;
+ }
devip->create_ts = ktime_get_boottime();
atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
spin_lock_init(&devip->list_lock);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f1cfe0bb89b2..7a31dae9aa82 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1253,8 +1253,12 @@ EXPORT_SYMBOL_GPL(scsi_alloc_request);
*/
static void scsi_cleanup_rq(struct request *rq)
{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+
+ cmd->flags = 0;
+
if (rq->rq_flags & RQF_DONTPREP) {
- scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
+ scsi_mq_uninit_cmd(cmd);
rq->rq_flags &= ~RQF_DONTPREP;
}
}
diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c
index be4aef0f4f99..055a03a83ad6 100644
--- a/drivers/scsi/scsi_sysctl.c
+++ b/drivers/scsi/scsi_sysctl.c
@@ -17,7 +17,9 @@ static const struct ctl_table scsi_table[] = {
.data = &scsi_logging_level,
.maxlen = sizeof(scsi_logging_level),
.mode = 0644,
- .proc_handler = proc_dointvec },
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_INT_MAX },
};
static struct ctl_table_header *scsi_table_header;
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 7a447ff600d2..a8db66428f80 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -169,6 +169,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
unsigned int nr_zones, size_t *buflen)
{
struct request_queue *q = sdkp->disk->queue;
+ unsigned int max_segments;
size_t bufsize;
void *buf;
@@ -180,12 +181,15 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
* Furthermore, since the report zone command cannot be split, make
* sure that the allocated buffer can always be mapped by limiting the
* number of pages allocated to the HBA max segments limit.
+ * Since max segments can be larger than the max inline bio vectors,
+ * further limit the allocated buffer to BIO_MAX_INLINE_VECS.
*/
nr_zones = min(nr_zones, sdkp->zone_info.nr_zones);
bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE);
bufsize = min_t(size_t, bufsize,
queue_max_hw_sectors(q) << SECTOR_SHIFT);
- bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
+ max_segments = min(BIO_MAX_INLINE_VECS, queue_max_segments(q));
+ bufsize = min_t(size_t, bufsize, max_segments << PAGE_SHIFT);
while (bufsize >= SECTOR_SIZE) {
buf = kvzalloc(bufsize, GFP_KERNEL | __GFP_NORETRY);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 344e4da336bb..7dec7958344e 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -952,7 +952,6 @@ static void reset_state(struct scsi_tape *STp)
STp->partition = find_partition(STp);
if (STp->partition < 0)
STp->partition = 0;
- STp->new_partition = STp->partition;
}
}
@@ -2897,7 +2896,6 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
timeout = STp->long_timeout * 8;
DEBC_printk(STp, "Erasing tape.\n");
- fileno = blkno = at_sm = 0;
break;
case MTSETBLK: /* Set block length */
case MTSETDENSITY: /* Set tape density */
@@ -2930,14 +2928,17 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
if (cmd_in == MTSETDENSITY) {
(STp->buffer)->b_data[4] = arg;
STp->density_changed = 1; /* At least we tried ;-) */
+ STp->changed_density = arg;
} else if (cmd_in == SET_DENS_AND_BLK)
(STp->buffer)->b_data[4] = arg >> 24;
else
(STp->buffer)->b_data[4] = STp->density;
if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) {
ltmp = arg & MT_ST_BLKSIZE_MASK;
- if (cmd_in == MTSETBLK)
+ if (cmd_in == MTSETBLK) {
STp->blksize_changed = 1; /* At least we tried ;-) */
+ STp->changed_blksize = arg;
+ }
} else
ltmp = STp->block_size;
(STp->buffer)->b_data[9] = (ltmp >> 16);
@@ -3084,7 +3085,9 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
cmd_in == MTSETDRVBUFFER ||
cmd_in == SET_DENS_AND_BLK) {
if (cmdstatp->sense_hdr.sense_key == ILLEGAL_REQUEST &&
- !(STp->use_pf & PF_TESTED)) {
+ cmdstatp->sense_hdr.asc == 0x24 &&
+ (STp->device)->scsi_level <= SCSI_2 &&
+ !(STp->use_pf & PF_TESTED)) {
/* Try the other possible state of Page Format if not
already tried */
STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED;
@@ -3636,9 +3639,25 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
retval = (-EIO);
goto out;
}
- reset_state(STp);
+ reset_state(STp); /* Clears pos_unknown */
/* remove this when the midlevel properly clears was_reset */
STp->device->was_reset = 0;
+
+ /* Fix the device settings after reset, ignore errors */
+ if (mtc.mt_op == MTREW || mtc.mt_op == MTSEEK ||
+ mtc.mt_op == MTEOM) {
+ if (STp->can_partitions) {
+ /* STp->new_partition contains the
+ * latest partition set
+ */
+ STp->partition = 0;
+ switch_partition(STp);
+ }
+ if (STp->density_changed)
+ st_int_ioctl(STp, MTSETDENSITY, STp->changed_density);
+ if (STp->blksize_changed)
+ st_int_ioctl(STp, MTSETBLK, STp->changed_blksize);
+ }
}
if (mtc.mt_op != MTNOP && mtc.mt_op != MTSETBLK &&
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 1aaaf5369a40..6d31b894ee84 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -165,6 +165,7 @@ struct scsi_tape {
unsigned char compression_changed;
unsigned char drv_buffer;
unsigned char density;
+ unsigned char changed_density;
unsigned char door_locked;
unsigned char autorew_dev; /* auto-rewind device */
unsigned char rew_at_close; /* rewind necessary at close */
@@ -172,6 +173,7 @@ struct scsi_tape {
unsigned char cleaning_req; /* cleaning requested? */
unsigned char first_tur; /* first TEST UNIT READY */
int block_size;
+ int changed_blksize;
int min_block;
int max_block;
int recover_count; /* From tape opening */
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index a8614e54544e..909b02460209 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1819,6 +1819,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
return SCSI_MLQUEUE_DEVICE_BUSY;
}
+ payload->rangecount = 1;
payload->range.len = length;
payload->range.offset = offset_in_hvpg;
diff --git a/drivers/soc/apple/rtkit-internal.h b/drivers/soc/apple/rtkit-internal.h
index 27c9fa745fd5..b8d5244678f0 100644
--- a/drivers/soc/apple/rtkit-internal.h
+++ b/drivers/soc/apple/rtkit-internal.h
@@ -44,6 +44,7 @@ struct apple_rtkit {
struct apple_rtkit_shmem ioreport_buffer;
struct apple_rtkit_shmem crashlog_buffer;
+ struct apple_rtkit_shmem oslog_buffer;
struct apple_rtkit_shmem syslog_buffer;
char *syslog_msg_buffer;
diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c
index e6d940292c9f..45ccbe2cbcd6 100644
--- a/drivers/soc/apple/rtkit.c
+++ b/drivers/soc/apple/rtkit.c
@@ -66,8 +66,9 @@ enum {
#define APPLE_RTKIT_SYSLOG_MSG_SIZE GENMASK_ULL(31, 24)
#define APPLE_RTKIT_OSLOG_TYPE GENMASK_ULL(63, 56)
-#define APPLE_RTKIT_OSLOG_INIT 1
-#define APPLE_RTKIT_OSLOG_ACK 3
+#define APPLE_RTKIT_OSLOG_BUFFER_REQUEST 1
+#define APPLE_RTKIT_OSLOG_SIZE GENMASK_ULL(55, 36)
+#define APPLE_RTKIT_OSLOG_IOVA GENMASK_ULL(35, 0)
#define APPLE_RTKIT_MIN_SUPPORTED_VERSION 11
#define APPLE_RTKIT_MAX_SUPPORTED_VERSION 12
@@ -251,15 +252,21 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk,
struct apple_rtkit_shmem *buffer,
u8 ep, u64 msg)
{
- size_t n_4kpages = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg);
u64 reply;
int err;
+ /* The different size vs. IOVA shifts look odd but are indeed correct this way */
+ if (ep == APPLE_RTKIT_EP_OSLOG) {
+ buffer->size = FIELD_GET(APPLE_RTKIT_OSLOG_SIZE, msg);
+ buffer->iova = FIELD_GET(APPLE_RTKIT_OSLOG_IOVA, msg) << 12;
+ } else {
+ buffer->size = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg) << 12;
+ buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg);
+ }
+
buffer->buffer = NULL;
buffer->iomem = NULL;
buffer->is_mapped = false;
- buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg);
- buffer->size = n_4kpages << 12;
dev_dbg(rtk->dev, "RTKit: buffer request for 0x%zx bytes at %pad\n",
buffer->size, &buffer->iova);
@@ -284,11 +291,21 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk,
}
if (!buffer->is_mapped) {
- reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE,
- APPLE_RTKIT_BUFFER_REQUEST);
- reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE, n_4kpages);
- reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA,
- buffer->iova);
+ /* oslog uses different fields and needs a shifted IOVA instead of size */
+ if (ep == APPLE_RTKIT_EP_OSLOG) {
+ reply = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE,
+ APPLE_RTKIT_OSLOG_BUFFER_REQUEST);
+ reply |= FIELD_PREP(APPLE_RTKIT_OSLOG_SIZE, buffer->size);
+ reply |= FIELD_PREP(APPLE_RTKIT_OSLOG_IOVA,
+ buffer->iova >> 12);
+ } else {
+ reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE,
+ APPLE_RTKIT_BUFFER_REQUEST);
+ reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE,
+ buffer->size >> 12);
+ reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA,
+ buffer->iova);
+ }
apple_rtkit_send_message(rtk, ep, reply, NULL, false);
}
@@ -482,25 +499,18 @@ static void apple_rtkit_syslog_rx(struct apple_rtkit *rtk, u64 msg)
}
}
-static void apple_rtkit_oslog_rx_init(struct apple_rtkit *rtk, u64 msg)
-{
- u64 ack;
-
- dev_dbg(rtk->dev, "RTKit: oslog init: msg: 0x%llx\n", msg);
- ack = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE, APPLE_RTKIT_OSLOG_ACK);
- apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_OSLOG, ack, NULL, false);
-}
-
static void apple_rtkit_oslog_rx(struct apple_rtkit *rtk, u64 msg)
{
u8 type = FIELD_GET(APPLE_RTKIT_OSLOG_TYPE, msg);
switch (type) {
- case APPLE_RTKIT_OSLOG_INIT:
- apple_rtkit_oslog_rx_init(rtk, msg);
+ case APPLE_RTKIT_OSLOG_BUFFER_REQUEST:
+ apple_rtkit_common_rx_get_buffer(rtk, &rtk->oslog_buffer,
+ APPLE_RTKIT_EP_OSLOG, msg);
break;
default:
- dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n", msg);
+ dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n",
+ msg);
}
}
@@ -667,7 +677,7 @@ struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
rtk->mbox->rx = apple_rtkit_rx;
rtk->mbox->cookie = rtk;
- rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_MEM_RECLAIM,
+ rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_HIGHPRI | WQ_MEM_RECLAIM,
dev_name(rtk->dev));
if (!rtk->wq) {
ret = -ENOMEM;
@@ -710,6 +720,7 @@ int apple_rtkit_reinit(struct apple_rtkit *rtk)
apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
+ apple_rtkit_free_buffer(rtk, &rtk->oslog_buffer);
apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
kfree(rtk->syslog_msg_buffer);
@@ -890,6 +901,7 @@ void apple_rtkit_free(struct apple_rtkit *rtk)
apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
+ apple_rtkit_free_buffer(rtk, &rtk->oslog_buffer);
apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
kfree(rtk->syslog_msg_buffer);
diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c
index 5250c1d702eb..aaa965d4b050 100644
--- a/drivers/soc/mediatek/mtk-mutex.c
+++ b/drivers/soc/mediatek/mtk-mutex.c
@@ -155,6 +155,7 @@
#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE3 23
#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE4 24
#define MT8188_MUTEX_MOD_DISP1_DISP_MIXER 30
+#define MT8188_MUTEX_MOD_DISP1_DPI1 38
#define MT8188_MUTEX_MOD_DISP1_DP_INTF1 39
#define MT8195_MUTEX_MOD_DISP_OVL0 0
@@ -289,6 +290,7 @@
#define MT8188_MUTEX_SOF_DSI0 1
#define MT8188_MUTEX_SOF_DP_INTF0 3
#define MT8188_MUTEX_SOF_DP_INTF1 4
+#define MT8188_MUTEX_SOF_DPI1 5
#define MT8195_MUTEX_SOF_DSI0 1
#define MT8195_MUTEX_SOF_DSI1 2
#define MT8195_MUTEX_SOF_DP_INTF0 3
@@ -301,6 +303,7 @@
#define MT8188_MUTEX_EOF_DSI0 (MT8188_MUTEX_SOF_DSI0 << 7)
#define MT8188_MUTEX_EOF_DP_INTF0 (MT8188_MUTEX_SOF_DP_INTF0 << 7)
#define MT8188_MUTEX_EOF_DP_INTF1 (MT8188_MUTEX_SOF_DP_INTF1 << 7)
+#define MT8188_MUTEX_EOF_DPI1 (MT8188_MUTEX_SOF_DPI1 << 7)
#define MT8195_MUTEX_EOF_DSI0 (MT8195_MUTEX_SOF_DSI0 << 7)
#define MT8195_MUTEX_EOF_DSI1 (MT8195_MUTEX_SOF_DSI1 << 7)
#define MT8195_MUTEX_EOF_DP_INTF0 (MT8195_MUTEX_SOF_DP_INTF0 << 7)
@@ -472,6 +475,7 @@ static const u8 mt8188_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_PWM0] = MT8188_MUTEX_MOD2_DISP_PWM0,
[DDP_COMPONENT_DP_INTF0] = MT8188_MUTEX_MOD_DISP_DP_INTF0,
[DDP_COMPONENT_DP_INTF1] = MT8188_MUTEX_MOD_DISP1_DP_INTF1,
+ [DDP_COMPONENT_DPI1] = MT8188_MUTEX_MOD_DISP1_DPI1,
[DDP_COMPONENT_ETHDR_MIXER] = MT8188_MUTEX_MOD_DISP1_DISP_MIXER,
[DDP_COMPONENT_MDP_RDMA0] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA0,
[DDP_COMPONENT_MDP_RDMA1] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA1,
@@ -686,6 +690,8 @@ static const u16 mt8188_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] =
MT8188_MUTEX_SOF_DSI0 | MT8188_MUTEX_EOF_DSI0,
+ [MUTEX_SOF_DPI1] =
+ MT8188_MUTEX_SOF_DPI1 | MT8188_MUTEX_EOF_DPI1,
[MUTEX_SOF_DP_INTF0] =
MT8188_MUTEX_SOF_DP_INTF0 | MT8188_MUTEX_EOF_DP_INTF0,
[MUTEX_SOF_DP_INTF1] =
diff --git a/drivers/soc/qcom/ice.c b/drivers/soc/qcom/ice.c
index 393d2d1d275f..79e04bff3e33 100644
--- a/drivers/soc/qcom/ice.c
+++ b/drivers/soc/qcom/ice.c
@@ -11,6 +11,7 @@
#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -324,6 +325,53 @@ struct qcom_ice *of_qcom_ice_get(struct device *dev)
}
EXPORT_SYMBOL_GPL(of_qcom_ice_get);
+static void qcom_ice_put(const struct qcom_ice *ice)
+{
+ struct platform_device *pdev = to_platform_device(ice->dev);
+
+ if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice"))
+ platform_device_put(pdev);
+}
+
+static void devm_of_qcom_ice_put(struct device *dev, void *res)
+{
+ qcom_ice_put(*(struct qcom_ice **)res);
+}
+
+/**
+ * devm_of_qcom_ice_get() - Devres managed helper to get an ICE instance from
+ * a DT node.
+ * @dev: device pointer for the consumer device.
+ *
+ * This function will provide an ICE instance either by creating one for the
+ * consumer device if its DT node provides the 'ice' reg range and the 'ice'
+ * clock (for legacy DT style). On the other hand, if consumer provides a
+ * phandle via 'qcom,ice' property to an ICE DT, the ICE instance will already
+ * be created and so this function will return that instead.
+ *
+ * Return: ICE pointer on success, NULL if there is no ICE data provided by the
+ * consumer or ERR_PTR() on error.
+ */
+struct qcom_ice *devm_of_qcom_ice_get(struct device *dev)
+{
+ struct qcom_ice *ice, **dr;
+
+ dr = devres_alloc(devm_of_qcom_ice_put, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ ice = of_qcom_ice_get(dev);
+ if (!IS_ERR_OR_NULL(ice)) {
+ *dr = ice;
+ devres_add(dev, dr);
+ } else {
+ devres_free(dr);
+ }
+
+ return ice;
+}
+EXPORT_SYMBOL_GPL(devm_of_qcom_ice_get);
+
static int qcom_ice_probe(struct platform_device *pdev)
{
struct qcom_ice *engine;
diff --git a/drivers/soc/samsung/exynos-asv.c b/drivers/soc/samsung/exynos-asv.c
index 97006cc3b946..8e681f519526 100644
--- a/drivers/soc/samsung/exynos-asv.c
+++ b/drivers/soc/samsung/exynos-asv.c
@@ -9,6 +9,7 @@
* Samsung Exynos SoC Adaptive Supply Voltage support
*/
+#include <linux/array_size.h>
#include <linux/cpu.h>
#include <linux/device.h>
#include <linux/energy_model.h>
diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
index 95294462ff21..99c5f9c80101 100644
--- a/drivers/soc/samsung/exynos-chipid.c
+++ b/drivers/soc/samsung/exynos-chipid.c
@@ -12,6 +12,7 @@
* Samsung Exynos SoC Adaptive Supply Voltage and Chip ID support
*/
+#include <linux/array_size.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/mfd/syscon.h>
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index dd5256e5aae1..c40313886a01 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -5,6 +5,7 @@
//
// Exynos - CPU PMU(Power Management Unit) support
+#include <linux/array_size.h>
#include <linux/arm-smccc.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/soc/samsung/exynos-usi.c b/drivers/soc/samsung/exynos-usi.c
index 114352695ac2..5a93a68dba87 100644
--- a/drivers/soc/samsung/exynos-usi.c
+++ b/drivers/soc/samsung/exynos-usi.c
@@ -6,6 +6,7 @@
* Samsung Exynos USI driver (Universal Serial Interface).
*/
+#include <linux/array_size.h>
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
diff --git a/drivers/soc/samsung/exynos3250-pmu.c b/drivers/soc/samsung/exynos3250-pmu.c
index 30f230ed1769..4bad12a99542 100644
--- a/drivers/soc/samsung/exynos3250-pmu.c
+++ b/drivers/soc/samsung/exynos3250-pmu.c
@@ -5,6 +5,7 @@
//
// Exynos3250 - CPU PMU (Power Management Unit) support
+#include <linux/array_size.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos5250-pmu.c b/drivers/soc/samsung/exynos5250-pmu.c
index 7a2d50be6b4a..2ae5c3e1b07a 100644
--- a/drivers/soc/samsung/exynos5250-pmu.c
+++ b/drivers/soc/samsung/exynos5250-pmu.c
@@ -5,6 +5,7 @@
//
// Exynos5250 - CPU PMU (Power Management Unit) support
+#include <linux/array_size.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos5420-pmu.c b/drivers/soc/samsung/exynos5420-pmu.c
index 6fedcd78cb45..58a2209795f7 100644
--- a/drivers/soc/samsung/exynos5420-pmu.c
+++ b/drivers/soc/samsung/exynos5420-pmu.c
@@ -5,6 +5,7 @@
//
// Exynos5420 - CPU PMU (Power Management Unit) support
+#include <linux/array_size.h>
#include <linux/pm.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
index 4fb0f0a24828..704039eb3c07 100644
--- a/drivers/soc/ti/k3-socinfo.c
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -105,6 +105,12 @@ err_unknown_variant:
return -ENODEV;
}
+static const struct regmap_config k3_chipinfo_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
static int k3_chipinfo_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
@@ -112,13 +118,18 @@ static int k3_chipinfo_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct soc_device *soc_dev;
struct regmap *regmap;
+ void __iomem *base;
u32 partno_id;
u32 variant;
u32 jtag_id;
u32 mfg;
int ret;
- regmap = device_node_to_regmap(node);
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = regmap_init_mmio(dev, base, &k3_chipinfo_regmap_cfg);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
index 5a54b10daf77..9d8062378724 100644
--- a/drivers/soundwire/amd_manager.c
+++ b/drivers/soundwire/amd_manager.c
@@ -1139,6 +1139,7 @@ static int __maybe_unused amd_suspend(struct device *dev)
amd_sdw_wake_enable(amd_manager, false);
return amd_sdw_clock_stop(amd_manager);
} else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
+ amd_sdw_wake_enable(amd_manager, false);
/*
* As per hardware programming sequence on AMD platforms,
* clock stop should be invoked first before powering-off
@@ -1166,6 +1167,7 @@ static int __maybe_unused amd_suspend_runtime(struct device *dev)
amd_sdw_wake_enable(amd_manager, true);
return amd_sdw_clock_stop(amd_manager);
} else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
+ amd_sdw_wake_enable(amd_manager, true);
ret = amd_sdw_clock_stop(amd_manager);
if (ret)
return ret;
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 9b295fc9acd5..df73e2c04090 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -121,6 +121,10 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
set_bit(SDW_GROUP13_DEV_NUM, bus->assigned);
set_bit(SDW_MASTER_DEV_NUM, bus->assigned);
+ ret = sdw_irq_create(bus, fwnode);
+ if (ret)
+ return ret;
+
/*
* SDW is an enumerable bus, but devices can be powered off. So,
* they won't be able to report as present.
@@ -137,6 +141,7 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
if (ret < 0) {
dev_err(bus->dev, "Finding slaves failed:%d\n", ret);
+ sdw_irq_delete(bus);
return ret;
}
@@ -155,10 +160,6 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
bus->params.curr_bank = SDW_BANK0;
bus->params.next_bank = SDW_BANK1;
- ret = sdw_irq_create(bus, fwnode);
- if (ret)
- return ret;
-
return 0;
}
EXPORT_SYMBOL(sdw_bus_master_add);
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index f367670ea991..68be8ff3f02b 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -1341,7 +1341,7 @@ static u32 cdns_set_initial_frame_shape(int n_rows, int n_cols)
return val;
}
-static void cdns_init_clock_ctrl(struct sdw_cdns *cdns)
+static int cdns_init_clock_ctrl(struct sdw_cdns *cdns)
{
struct sdw_bus *bus = &cdns->bus;
struct sdw_master_prop *prop = &bus->prop;
@@ -1355,14 +1355,25 @@ static void cdns_init_clock_ctrl(struct sdw_cdns *cdns)
prop->default_row,
prop->default_col);
+ if (!prop->default_frame_rate || !prop->default_row) {
+ dev_err(cdns->dev, "Default frame_rate %d or row %d is invalid\n",
+ prop->default_frame_rate, prop->default_row);
+ return -EINVAL;
+ }
+
/* Set clock divider */
- divider = (prop->mclk_freq / prop->max_clk_freq) - 1;
+ divider = (prop->mclk_freq * SDW_DOUBLE_RATE_FACTOR /
+ bus->params.curr_dr_freq) - 1;
cdns_updatel(cdns, CDNS_MCP_CLK_CTRL0,
CDNS_MCP_CLK_MCLKD_MASK, divider);
cdns_updatel(cdns, CDNS_MCP_CLK_CTRL1,
CDNS_MCP_CLK_MCLKD_MASK, divider);
+ /* Set frame shape base on the actual bus frequency. */
+ prop->default_col = bus->params.curr_dr_freq /
+ prop->default_frame_rate / prop->default_row;
+
/*
* Frame shape changes after initialization have to be done
* with the bank switch mechanism
@@ -1375,6 +1386,8 @@ static void cdns_init_clock_ctrl(struct sdw_cdns *cdns)
ssp_interval = prop->default_frame_rate / SDW_CADENCE_GSYNC_HZ;
cdns_writel(cdns, CDNS_MCP_SSP_CTRL0, ssp_interval);
cdns_writel(cdns, CDNS_MCP_SSP_CTRL1, ssp_interval);
+
+ return 0;
}
/**
@@ -1408,9 +1421,12 @@ EXPORT_SYMBOL(sdw_cdns_soft_reset);
*/
int sdw_cdns_init(struct sdw_cdns *cdns)
{
+ int ret;
u32 val;
- cdns_init_clock_ctrl(cdns);
+ ret = cdns_init_clock_ctrl(cdns);
+ if (ret)
+ return ret;
sdw_cdns_check_self_clearing_bits(cdns, __func__, false, 0);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 067c954cb6ea..863781ba6c16 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright 2013 Freescale Semiconductor, Inc.
-// Copyright 2020 NXP
+// Copyright 2020-2025 NXP
//
// Freescale DSPI driver
// This file contains a driver for the Freescale DSPI
@@ -62,6 +62,7 @@
#define SPI_SR_TFIWF BIT(18)
#define SPI_SR_RFDF BIT(17)
#define SPI_SR_CMDFFF BIT(16)
+#define SPI_SR_TXRXS BIT(30)
#define SPI_SR_CLEAR (SPI_SR_TCFQF | \
SPI_SR_TFUF | SPI_SR_TFFF | \
SPI_SR_CMDTCF | SPI_SR_SPEF | \
@@ -921,9 +922,20 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
struct spi_transfer *transfer;
bool cs = false;
int status = 0;
+ u32 val = 0;
+ bool cs_change = false;
message->actual_length = 0;
+ /* Put DSPI in running mode if halted. */
+ regmap_read(dspi->regmap, SPI_MCR, &val);
+ if (val & SPI_MCR_HALT) {
+ regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, 0);
+ while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 &&
+ !(val & SPI_SR_TXRXS))
+ ;
+ }
+
list_for_each_entry(transfer, &message->transfers, transfer_list) {
dspi->cur_transfer = transfer;
dspi->cur_msg = message;
@@ -953,6 +965,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
}
+ cs_change = transfer->cs_change;
dspi->tx = transfer->tx_buf;
dspi->rx = transfer->rx_buf;
dspi->len = transfer->len;
@@ -962,6 +975,8 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
+ regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
+
spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
dspi->progress, !dspi->irq);
@@ -988,6 +1003,15 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
dspi_deassert_cs(spi, &cs);
}
+ if (status || !cs_change) {
+ /* Put DSPI in stop mode */
+ regmap_update_bits(dspi->regmap, SPI_MCR,
+ SPI_MCR_HALT, SPI_MCR_HALT);
+ while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 &&
+ val & SPI_SR_TXRXS)
+ ;
+ }
+
message->status = status;
spi_finalize_current_message(ctlr);
@@ -1167,6 +1191,20 @@ static int dspi_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
+static const struct regmap_range dspi_yes_ranges[] = {
+ regmap_reg_range(SPI_MCR, SPI_MCR),
+ regmap_reg_range(SPI_TCR, SPI_CTAR(3)),
+ regmap_reg_range(SPI_SR, SPI_TXFR3),
+ regmap_reg_range(SPI_RXFR0, SPI_RXFR3),
+ regmap_reg_range(SPI_CTARE(0), SPI_CTARE(3)),
+ regmap_reg_range(SPI_SREX, SPI_SREX),
+};
+
+static const struct regmap_access_table dspi_access_table = {
+ .yes_ranges = dspi_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dspi_yes_ranges),
+};
+
static const struct regmap_range dspi_volatile_ranges[] = {
regmap_reg_range(SPI_MCR, SPI_TCR),
regmap_reg_range(SPI_SR, SPI_SR),
@@ -1184,6 +1222,8 @@ static const struct regmap_config dspi_regmap_config = {
.reg_stride = 4,
.max_register = 0x88,
.volatile_table = &dspi_volatile_table,
+ .rd_table = &dspi_access_table,
+ .wr_table = &dspi_access_table,
};
static const struct regmap_range dspi_xspi_volatile_ranges[] = {
@@ -1205,6 +1245,8 @@ static const struct regmap_config dspi_xspi_regmap_config[] = {
.reg_stride = 4,
.max_register = 0x13c,
.volatile_table = &dspi_xspi_volatile_table,
+ .rd_table = &dspi_access_table,
+ .wr_table = &dspi_access_table,
},
{
.name = "pushr",
@@ -1227,6 +1269,8 @@ static int dspi_init(struct fsl_dspi *dspi)
if (!spi_controller_is_target(dspi->ctlr))
mcr |= SPI_MCR_HOST;
+ mcr |= SPI_MCR_HALT;
+
regmap_write(dspi->regmap, SPI_MCR, mcr);
regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index eeb7d082c247..c43fb496da95 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1695,9 +1695,12 @@ static int spi_imx_transfer_one(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *transfer)
{
+ int ret;
struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
- spi_imx_setupxfer(spi, transfer);
+ ret = spi_imx_setupxfer(spi, transfer);
+ if (ret < 0)
+ return ret;
transfer->effective_speed_hz = spi_imx->spi_bus_clk;
/* flush rxfifo before transfer */
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index 31a878d9458d..7740f94847a8 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -420,7 +420,7 @@ MODULE_LICENSE("GPL");
static void spi_test_print_hex_dump(char *pre, const void *ptr, size_t len)
{
/* limit the hex_dump */
- if (len < 1024) {
+ if (len <= 1024) {
print_hex_dump(KERN_INFO, pre,
DUMP_PREFIX_OFFSET, 16, 1,
ptr, len, 0);
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index a9f0f47f4759..74b013c41601 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -585,7 +585,11 @@ u64 spi_mem_calc_op_duration(struct spi_mem_op *op)
ns_per_cycles = 1000000000 / op->max_freq;
ncycles += ((op->cmd.nbytes * 8) / op->cmd.buswidth) / (op->cmd.dtr ? 2 : 1);
ncycles += ((op->addr.nbytes * 8) / op->addr.buswidth) / (op->addr.dtr ? 2 : 1);
- ncycles += ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1);
+
+ /* Dummy bytes are optional for some SPI flash memory operations */
+ if (op->dummy.nbytes)
+ ncycles += ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1);
+
ncycles += ((op->data.nbytes * 8) / op->data.buswidth) / (op->data.dtr ? 2 : 1);
return ncycles * ns_per_cycles;
diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c
index c02c4204442f..0eb35c4e3987 100644
--- a/drivers/spi/spi-mux.c
+++ b/drivers/spi/spi-mux.c
@@ -68,9 +68,7 @@ static int spi_mux_select(struct spi_device *spi)
priv->current_cs = spi_get_chipselect(spi, 0);
- spi_setup(priv->spi);
-
- return 0;
+ return spi_setup(priv->spi);
}
static int spi_mux_setup(struct spi_device *spi)
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 1bc012fce7cb..1a6381de6f33 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -547,7 +547,7 @@ static int rockchip_spi_config(struct rockchip_spi *rs,
cr0 |= (spi->mode & 0x3U) << CR0_SCPH_OFFSET;
if (spi->mode & SPI_LSB_FIRST)
cr0 |= CR0_FBM_LSB << CR0_FBM_OFFSET;
- if (spi->mode & SPI_CS_HIGH)
+ if ((spi->mode & SPI_CS_HIGH) && !(spi_get_csgpiod(spi, 0)))
cr0 |= BIT(spi_get_chipselect(spi, 0)) << CR0_SOI_OFFSET;
if (xfer->rx_buf && xfer->tx_buf)
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index fcbe864c9b7d..4b070377e3d1 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -264,6 +264,9 @@ static int sun4i_spi_transfer_one(struct spi_controller *host,
else
reg |= SUN4I_CTL_DHB;
+ /* Now that the settings are correct, enable the interface */
+ reg |= SUN4I_CTL_ENABLE;
+
sun4i_spi_write(sspi, SUN4I_CTL_REG, reg);
/* Ensure that we have a parent clock fast enough */
@@ -404,7 +407,7 @@ static int sun4i_spi_runtime_resume(struct device *dev)
}
sun4i_spi_write(sspi, SUN4I_CTL_REG,
- SUN4I_CTL_ENABLE | SUN4I_CTL_MASTER | SUN4I_CTL_TP);
+ SUN4I_CTL_MASTER | SUN4I_CTL_TP);
return 0;
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 3822d7c8d8ed..795a8482c2c7 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -728,9 +728,9 @@ static int tegra_spi_set_hw_cs_timing(struct spi_device *spi)
u32 inactive_cycles;
u8 cs_state;
- if (setup->unit != SPI_DELAY_UNIT_SCK ||
- hold->unit != SPI_DELAY_UNIT_SCK ||
- inactive->unit != SPI_DELAY_UNIT_SCK) {
+ if ((setup->value && setup->unit != SPI_DELAY_UNIT_SCK) ||
+ (hold->value && hold->unit != SPI_DELAY_UNIT_SCK) ||
+ (inactive->value && inactive->unit != SPI_DELAY_UNIT_SCK)) {
dev_err(&spi->dev,
"Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n",
SPI_DELAY_UNIT_SCK);
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index 08e49a876894..64e1b2f8a000 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -1117,9 +1117,9 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
(&tqspi->xfer_completion,
QSPI_DMA_TIMEOUT);
- if (WARN_ON(ret == 0)) {
- dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n",
- ret);
+ if (WARN_ON_ONCE(ret == 0)) {
+ dev_err_ratelimited(tqspi->dev,
+ "QSPI Transfer failed with timeout\n");
if (tqspi->is_curr_dma_xfer &&
(tqspi->cur_direction & DATA_DIR_TX))
dmaengine_terminate_all
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index d800d79f62a7..12ab13edab54 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -799,7 +799,6 @@ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
{
struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_id;
- irqreturn_t ret = IRQ_NONE;
u32 status, mask, dma_status = 0;
status = zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST);
@@ -814,27 +813,24 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
dma_status);
}
- if (mask & GQSPI_ISR_TXNOT_FULL_MASK) {
+ if (!mask && !dma_status)
+ return IRQ_NONE;
+
+ if (mask & GQSPI_ISR_TXNOT_FULL_MASK)
zynqmp_qspi_filltxfifo(xqspi, GQSPI_TX_FIFO_FILL);
- ret = IRQ_HANDLED;
- }
- if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) {
+ if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK)
zynqmp_process_dma_irq(xqspi);
- ret = IRQ_HANDLED;
- } else if (!(mask & GQSPI_IER_RXEMPTY_MASK) &&
- (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) {
+ else if (!(mask & GQSPI_IER_RXEMPTY_MASK) &&
+ (mask & GQSPI_IER_GENFIFOEMPTY_MASK))
zynqmp_qspi_readrxfifo(xqspi, GQSPI_RX_FIFO_FILL);
- ret = IRQ_HANDLED;
- }
if (xqspi->bytes_to_receive == 0 && xqspi->bytes_to_transfer == 0 &&
((status & GQSPI_IRQ_MASK) == GQSPI_IRQ_MASK)) {
zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK);
complete(&xqspi->data_completion);
- ret = IRQ_HANDLED;
}
- return ret;
+ return IRQ_HANDLED;
}
/**
diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c
index 7540c20090c7..351f983ef914 100644
--- a/drivers/staging/axis-fifo/axis-fifo.c
+++ b/drivers/staging/axis-fifo/axis-fifo.c
@@ -393,16 +393,14 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
bytes_available = ioread32(fifo->base_addr + XLLF_RLR_OFFSET);
if (!bytes_available) {
- dev_err(fifo->dt_device, "received a packet of length 0 - fifo core will be reset\n");
- reset_ip_core(fifo);
+ dev_err(fifo->dt_device, "received a packet of length 0\n");
ret = -EIO;
goto end_unlock;
}
if (bytes_available > len) {
- dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu) - fifo core will be reset\n",
+ dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu)\n",
bytes_available, len);
- reset_ip_core(fifo);
ret = -EINVAL;
goto end_unlock;
}
@@ -411,8 +409,7 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
/* this probably can't happen unless IP
* registers were previously mishandled
*/
- dev_err(fifo->dt_device, "received a packet that isn't word-aligned - fifo core will be reset\n");
- reset_ip_core(fifo);
+ dev_err(fifo->dt_device, "received a packet that isn't word-aligned\n");
ret = -EIO;
goto end_unlock;
}
@@ -433,7 +430,6 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
if (copy_to_user(buf + copied * sizeof(u32), tmp_buf,
copy * sizeof(u32))) {
- reset_ip_core(fifo);
ret = -EFAULT;
goto end_unlock;
}
@@ -542,7 +538,6 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
if (copy_from_user(tmp_buf, buf + copied * sizeof(u32),
copy * sizeof(u32))) {
- reset_ip_core(fifo);
ret = -EFAULT;
goto end_unlock;
}
@@ -775,9 +770,6 @@ static int axis_fifo_parse_dt(struct axis_fifo *fifo)
goto end;
}
- /* IP sets TDFV to fifo depth - 4 so we will do the same */
- fifo->tx_fifo_depth -= 4;
-
ret = get_dts_property(fifo, "xlnx,use-rx-data", &fifo->has_rx_fifo);
if (ret) {
dev_err(fifo->dt_device, "missing xlnx,use-rx-data property\n");
diff --git a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
index c62407077d37..cd7fe7d814ce 100644
--- a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
+++ b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
@@ -66,10 +66,7 @@ int agilent_82350b_accel_read(gpib_board_t *board, uint8_t *buffer, size_t lengt
int j;
int count;
- if (num_fifo_bytes - i < agilent_82350b_fifo_size)
- block_size = num_fifo_bytes - i;
- else
- block_size = agilent_82350b_fifo_size;
+ block_size = min(num_fifo_bytes - i, agilent_82350b_fifo_size);
set_transfer_counter(a_priv, block_size);
writeb(ENABLE_TI_TO_SRAM | DIRECTION_GPIB_TO_HOST,
a_priv->gpib_base + SRAM_ACCESS_CONTROL_REG);
@@ -200,10 +197,7 @@ int agilent_82350b_accel_write(gpib_board_t *board, uint8_t *buffer, size_t leng
for (i = 1; i < fifotransferlength;) {
clear_bit(WRITE_READY_BN, &tms_priv->state);
- if (fifotransferlength - i < agilent_82350b_fifo_size)
- block_size = fifotransferlength - i;
- else
- block_size = agilent_82350b_fifo_size;
+ block_size = min(fifotransferlength - i, agilent_82350b_fifo_size);
set_transfer_counter(a_priv, block_size);
for (j = 0; j < block_size; ++j, ++i) {
// load data into board's sram
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index 6c14d7bcdd67..081b17f49863 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -136,7 +136,7 @@ static ssize_t ad7816_store_mode(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7816_chip_info *chip = iio_priv(indio_dev);
- if (strcmp(buf, "full")) {
+ if (strcmp(buf, "full") == 0) {
gpiod_set_value(chip->rdwr_pin, 1);
chip->mode = AD7816_FULL;
} else {
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index deec33f63bcf..e6724329356b 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -1902,6 +1902,7 @@ static int bcm2835_mmal_probe(struct vchiq_device *device)
__func__, ret);
goto free_dev;
}
+ dev->v4l2_dev.dev = &device->dev;
/* setup v4l controls */
ret = bcm2835_mmal_init_controls(dev, &dev->ctrl_handler);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 0c7ea2d0ee85..64f9536f1232 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -280,29 +280,6 @@ static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state
return 0;
}
-int
-vchiq_platform_init_state(struct vchiq_state *state)
-{
- struct vchiq_arm_state *platform_state;
-
- platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL);
- if (!platform_state)
- return -ENOMEM;
-
- rwlock_init(&platform_state->susp_res_lock);
-
- init_completion(&platform_state->ka_evt);
- atomic_set(&platform_state->ka_use_count, 0);
- atomic_set(&platform_state->ka_use_ack_count, 0);
- atomic_set(&platform_state->ka_release_count, 0);
-
- platform_state->state = state;
-
- state->platform_state = (struct opaque_platform_state *)platform_state;
-
- return 0;
-}
-
static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state)
{
return (struct vchiq_arm_state *)state->platform_state;
@@ -1012,6 +989,39 @@ exit:
}
int
+vchiq_platform_init_state(struct vchiq_state *state)
+{
+ struct vchiq_arm_state *platform_state;
+ char threadname[16];
+
+ platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL);
+ if (!platform_state)
+ return -ENOMEM;
+
+ snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
+ state->id);
+ platform_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
+ (void *)state, threadname);
+ if (IS_ERR(platform_state->ka_thread)) {
+ dev_err(state->dev, "couldn't create thread %s\n", threadname);
+ return PTR_ERR(platform_state->ka_thread);
+ }
+
+ rwlock_init(&platform_state->susp_res_lock);
+
+ init_completion(&platform_state->ka_evt);
+ atomic_set(&platform_state->ka_use_count, 0);
+ atomic_set(&platform_state->ka_use_ack_count, 0);
+ atomic_set(&platform_state->ka_release_count, 0);
+
+ platform_state->state = state;
+
+ state->platform_state = (struct opaque_platform_state *)platform_state;
+
+ return 0;
+}
+
+int
vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
enum USE_TYPE_E use_type)
{
@@ -1331,7 +1341,6 @@ void vchiq_platform_conn_state_changed(struct vchiq_state *state,
enum vchiq_connstate newstate)
{
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
- char threadname[16];
dev_dbg(state->dev, "suspend: %d: %s->%s\n",
state->id, get_conn_state_name(oldstate), get_conn_state_name(newstate));
@@ -1346,17 +1355,7 @@ void vchiq_platform_conn_state_changed(struct vchiq_state *state,
arm_state->first_connect = 1;
write_unlock_bh(&arm_state->susp_res_lock);
- snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
- state->id);
- arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
- (void *)state,
- threadname);
- if (IS_ERR(arm_state->ka_thread)) {
- dev_err(state->dev, "suspend: Couldn't create thread %s\n",
- threadname);
- } else {
- wake_up_process(arm_state->ka_thread);
- }
+ wake_up_process(arm_state->ka_thread);
}
static const struct of_device_id vchiq_of_match[] = {
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 1244ef3aa86c..620ba6e0ab07 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4263,8 +4263,8 @@ int iscsit_close_connection(
spin_unlock(&iscsit_global->ts_bitmap_lock);
iscsit_stop_timers_for_cmds(conn);
- iscsit_stop_nopin_response_timer(conn);
iscsit_stop_nopin_timer(conn);
+ iscsit_stop_nopin_response_timer(conn);
if (conn->conn_transport->iscsit_wait_conn)
conn->conn_transport->iscsit_wait_conn(conn);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index d1ae3df069a4..cc2da086f96e 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1078,8 +1078,8 @@ passthrough_parse_cdb(struct se_cmd *cmd,
if (!dev->dev_attrib.emulate_pr &&
((cdb[0] == PERSISTENT_RESERVE_IN) ||
(cdb[0] == PERSISTENT_RESERVE_OUT) ||
- (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
- (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
+ (cdb[0] == RELEASE_6 || cdb[0] == RELEASE_10) ||
+ (cdb[0] == RESERVE_6 || cdb[0] == RESERVE_10))) {
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
@@ -1101,7 +1101,7 @@ passthrough_parse_cdb(struct se_cmd *cmd,
return target_cmd_size_check(cmd, size);
}
- if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
+ if (cdb[0] == RELEASE_6 || cdb[0] == RELEASE_10) {
cmd->execute_cmd = target_scsi2_reservation_release;
if (cdb[0] == RELEASE_10)
size = get_unaligned_be16(&cdb[7]);
@@ -1109,7 +1109,7 @@ passthrough_parse_cdb(struct se_cmd *cmd,
size = cmd->data_length;
return target_cmd_size_check(cmd, size);
}
- if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
+ if (cdb[0] == RESERVE_6 || cdb[0] == RESERVE_10) {
cmd->execute_cmd = target_scsi2_reservation_reserve;
if (cdb[0] == RESERVE_10)
size = get_unaligned_be16(&cdb[7]);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 4f4ad6af416c..34cf2c399b39 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -91,7 +91,7 @@ target_scsi2_reservation_check(struct se_cmd *cmd)
switch (cmd->t_task_cdb[0]) {
case INQUIRY:
- case RELEASE:
+ case RELEASE_6:
case RELEASE_10:
return 0;
default:
@@ -418,12 +418,12 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
return -EINVAL;
}
break;
- case RELEASE:
+ case RELEASE_6:
case RELEASE_10:
/* Handled by CRH=1 in target_scsi2_reservation_release() */
ret = 0;
break;
- case RESERVE:
+ case RESERVE_6:
case RESERVE_10:
/* Handled by CRH=1 in target_scsi2_reservation_reserve() */
ret = 0;
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 61c065702350..0a02492bef70 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -1674,9 +1674,9 @@ static bool tcm_is_pr_enabled(struct target_opcode_descriptor *descr,
return true;
switch (descr->opcode) {
- case RESERVE:
+ case RESERVE_6:
case RESERVE_10:
- case RELEASE:
+ case RELEASE_6:
case RELEASE_10:
/*
* The pr_ops which are used by the backend modules don't
@@ -1828,9 +1828,9 @@ static struct target_opcode_descriptor tcm_opcode_pro_register_move = {
static struct target_opcode_descriptor tcm_opcode_release = {
.support = SCSI_SUPPORT_FULL,
- .opcode = RELEASE,
+ .opcode = RELEASE_6,
.cdb_size = 6,
- .usage_bits = {RELEASE, 0x00, 0x00, 0x00,
+ .usage_bits = {RELEASE_6, 0x00, 0x00, 0x00,
0x00, SCSI_CONTROL_MASK},
.enabled = tcm_is_pr_enabled,
};
@@ -1847,9 +1847,9 @@ static struct target_opcode_descriptor tcm_opcode_release10 = {
static struct target_opcode_descriptor tcm_opcode_reserve = {
.support = SCSI_SUPPORT_FULL,
- .opcode = RESERVE,
+ .opcode = RESERVE_6,
.cdb_size = 6,
- .usage_bits = {RESERVE, 0x00, 0x00, 0x00,
+ .usage_bits = {RESERVE_6, 0x00, 0x00, 0x00,
0x00, SCSI_CONTROL_MASK},
.enabled = tcm_is_pr_enabled,
};
@@ -2151,8 +2151,10 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
if (descr->serv_action_valid)
return TCM_INVALID_CDB_FIELD;
- if (!descr->enabled || descr->enabled(descr, cmd))
+ if (!descr->enabled || descr->enabled(descr, cmd)) {
*opcode = descr;
+ return TCM_NO_SENSE;
+ }
break;
case 0x2:
/*
@@ -2166,8 +2168,10 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
if (descr->serv_action_valid &&
descr->service_action == requested_sa) {
if (!descr->enabled || descr->enabled(descr,
- cmd))
+ cmd)) {
*opcode = descr;
+ return TCM_NO_SENSE;
+ }
} else if (!descr->serv_action_valid)
return TCM_INVALID_CDB_FIELD;
break;
@@ -2180,13 +2184,15 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
*/
if (descr->service_action == requested_sa)
if (!descr->enabled || descr->enabled(descr,
- cmd))
+ cmd)) {
*opcode = descr;
+ return TCM_NO_SENSE;
+ }
break;
}
}
- return 0;
+ return TCM_NO_SENSE;
}
static sense_reason_t
@@ -2267,9 +2273,9 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
unsigned char *cdb = cmd->t_task_cdb;
switch (cdb[0]) {
- case RESERVE:
+ case RESERVE_6:
case RESERVE_10:
- case RELEASE:
+ case RELEASE_6:
case RELEASE_10:
if (!dev->dev_attrib.emulate_pr)
return TCM_UNSUPPORTED_SCSI_OPCODE;
@@ -2313,7 +2319,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
*size = get_unaligned_be32(&cdb[5]);
cmd->execute_cmd = target_scsi3_emulate_pr_out;
break;
- case RELEASE:
+ case RELEASE_6:
case RELEASE_10:
if (cdb[0] == RELEASE_10)
*size = get_unaligned_be16(&cdb[7]);
@@ -2322,7 +2328,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
cmd->execute_cmd = target_scsi2_reservation_release;
break;
- case RESERVE:
+ case RESERVE_6:
case RESERVE_10:
/*
* The SPC-2 RESERVE does not contain a size in the SCSI CDB.
diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c
index 496abf8e55e0..2841d14914b7 100644
--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
@@ -329,6 +329,7 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
tj_max = intel_tcc_get_tjmax(cpu);
if (tj_max < 0)
return tj_max;
+ tj_max *= 1000;
zonedev = kzalloc(sizeof(*zonedev), GFP_KERNEL);
if (!zonedev)
diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
index 0aaa44b734ca..d0901d8ac85d 100644
--- a/drivers/thermal/mediatek/lvts_thermal.c
+++ b/drivers/thermal/mediatek/lvts_thermal.c
@@ -65,7 +65,6 @@
#define LVTS_HW_FILTER 0x0
#define LVTS_TSSEL_CONF 0x13121110
#define LVTS_CALSCALE_CONF 0x300
-#define LVTS_MONINT_CONF 0x0300318C
#define LVTS_MONINT_OFFSET_SENSOR0 0xC
#define LVTS_MONINT_OFFSET_SENSOR1 0x180
@@ -929,7 +928,7 @@ static int lvts_irq_init(struct lvts_ctrl *lvts_ctrl)
* The LVTS_MONINT register layout is the same as the LVTS_MONINTSTS
* register, except we set the bits to enable the interrupt.
*/
- writel(LVTS_MONINT_CONF, LVTS_MONINT(lvts_ctrl->base));
+ writel(0, LVTS_MONINT(lvts_ctrl->base));
return 0;
}
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index 52e26be8c53d..aed2729f63d0 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -18,6 +18,7 @@
#define SITES_MAX 16
#define TMR_DISABLE 0x0
#define TMR_ME 0x80000000
+#define TMR_CMD BIT(29)
#define TMR_ALPF 0x0c000000
#define TMR_ALPF_V2 0x03000000
#define TMTMIR_DEFAULT 0x0000000f
@@ -356,6 +357,12 @@ static int qoriq_tmu_suspend(struct device *dev)
if (ret)
return ret;
+ if (data->ver > TMU_VER1) {
+ ret = regmap_set_bits(data->regmap, REGS_TMR, TMR_CMD);
+ if (ret)
+ return ret;
+ }
+
clk_disable_unprepare(data->clk);
return 0;
@@ -370,6 +377,12 @@ static int qoriq_tmu_resume(struct device *dev)
if (ret)
return ret;
+ if (data->ver > TMU_VER1) {
+ ret = regmap_clear_bits(data->regmap, REGS_TMR, TMR_CMD);
+ if (ret)
+ return ret;
+ }
+
/* Enable monitoring */
return regmap_update_bits(data->regmap, REGS_TMR, TMR_ME, TMR_ME);
}
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index dc1f456736dc..2ec6c9e477d0 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -151,6 +151,11 @@ static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
struct tb_ctl *ctl = req->ctl;
mutex_lock(&ctl->request_queue_lock);
+ if (!test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags)) {
+ mutex_unlock(&ctl->request_queue_lock);
+ return;
+ }
+
list_del(&req->list);
clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index 1f25529fe05d..361fece3d818 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -93,9 +93,11 @@ static int tb_retimer_nvm_add(struct tb_retimer *rt)
if (ret)
goto err_nvm;
- ret = tb_nvm_add_non_active(nvm, nvm_write);
- if (ret)
- goto err_nvm;
+ if (!rt->no_nvm_upgrade) {
+ ret = tb_nvm_add_non_active(nvm, nvm_write);
+ if (ret)
+ goto err_nvm;
+ }
rt->nvm = nvm;
dev_dbg(&rt->dev, "NVM version %x.%x\n", nvm->major, nvm->minor);
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 390abcfe7188..8c527af98927 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -1305,12 +1305,16 @@ static void tb_scan_port(struct tb_port *port)
goto out_rpm_put;
}
- tb_retimer_scan(port, true);
-
sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
tb_downstream_route(port));
if (IS_ERR(sw)) {
/*
+ * Make the downstream retimers available even if there
+ * is no router connected.
+ */
+ tb_retimer_scan(port, true);
+
+ /*
* If there is an error accessing the connected switch
* it may be connected to another domain. Also we allow
* the other domain to be connected to a max depth switch.
@@ -1360,6 +1364,14 @@ static void tb_scan_port(struct tb_port *port)
tb_configure_link(port, upstream_port, sw);
/*
+ * Scan for downstream retimers. We only scan them after the
+ * router has been enumerated to avoid issues with certain
+ * Pluggable devices that expect the host to enumerate them
+ * within certain timeout.
+ */
+ tb_retimer_scan(port, true);
+
+ /*
* CL0s and CL1 are enabled and supported together.
* Silently ignore CLx enabling in case CLx is not supported.
*/
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 442967a6cd52..886e40f680d4 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1680,7 +1680,7 @@ static void serial8250_disable_ms(struct uart_port *port)
if (up->bugs & UART_BUG_NOMSR)
return;
- mctrl_gpio_disable_ms(up->gpios);
+ mctrl_gpio_disable_ms_no_sync(up->gpios);
up->ier &= ~UART_IER_MSI;
serial_port_out(port, UART_IER, up->ier);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index f44f9d20a974..8918fbd4bddd 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -700,7 +700,7 @@ static void atmel_disable_ms(struct uart_port *port)
atmel_port->ms_irq_enabled = false;
- mctrl_gpio_disable_ms(atmel_port->gpios);
+ mctrl_gpio_disable_ms_no_sync(atmel_port->gpios);
if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
idr |= ATMEL_US_CTSIC;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 9c59ec128bb4..cfeb3f8cf45e 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1608,7 +1608,7 @@ static void imx_uart_shutdown(struct uart_port *port)
imx_uart_dma_exit(sport);
}
- mctrl_gpio_disable_ms(sport->gpios);
+ mctrl_gpio_disable_ms_sync(sport->gpios);
uart_port_lock_irqsave(&sport->port, &flags);
ucr2 = imx_uart_readl(sport, UCR2);
diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
index ce0fef7e2c66..be2f130696b3 100644
--- a/drivers/tty/serial/jsm/jsm_tty.c
+++ b/drivers/tty/serial/jsm/jsm_tty.c
@@ -451,6 +451,7 @@ int jsm_uart_port_init(struct jsm_board *brd)
if (!brd->channels[i])
continue;
+ brd->channels[i]->uart_port.dev = &brd->pci_dev->dev;
brd->channels[i]->uart_port.irq = brd->irq;
brd->channels[i]->uart_port.uartclk = 14745600;
brd->channels[i]->uart_port.type = PORT_JSM;
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 1b137e068444..3449945493ce 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -1746,6 +1746,12 @@ msm_serial_early_console_setup_dm(struct earlycon_device *device,
if (!device->port.membase)
return -ENODEV;
+ /* Disable DM / single-character modes */
+ msm_write(&device->port, 0, UARTDM_DMEN);
+ msm_write(&device->port, MSM_UART_CR_CMD_RESET_RX, MSM_UART_CR);
+ msm_write(&device->port, MSM_UART_CR_CMD_RESET_TX, MSM_UART_CR);
+ msm_write(&device->port, MSM_UART_CR_TX_ENABLE, MSM_UART_CR);
+
device->con->write = msm_serial_early_write_dm;
return 0;
}
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index 8855688a5b6c..ca55bcc0b611 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -322,11 +322,7 @@ void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios)
}
EXPORT_SYMBOL_GPL(mctrl_gpio_enable_ms);
-/**
- * mctrl_gpio_disable_ms - disable irqs and handling of changes to the ms lines
- * @gpios: gpios to disable
- */
-void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
+static void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios, bool sync)
{
enum mctrl_gpio_idx i;
@@ -342,10 +338,34 @@ void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
if (!gpios->irq[i])
continue;
- disable_irq(gpios->irq[i]);
+ if (sync)
+ disable_irq(gpios->irq[i]);
+ else
+ disable_irq_nosync(gpios->irq[i]);
}
}
-EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms);
+
+/**
+ * mctrl_gpio_disable_ms_sync - disable irqs and handling of changes to the ms
+ * lines, and wait for any pending IRQ to be processed
+ * @gpios: gpios to disable
+ */
+void mctrl_gpio_disable_ms_sync(struct mctrl_gpios *gpios)
+{
+ mctrl_gpio_disable_ms(gpios, true);
+}
+EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms_sync);
+
+/**
+ * mctrl_gpio_disable_ms_no_sync - disable irqs and handling of changes to the
+ * ms lines, and return immediately
+ * @gpios: gpios to disable
+ */
+void mctrl_gpio_disable_ms_no_sync(struct mctrl_gpios *gpios)
+{
+ mctrl_gpio_disable_ms(gpios, false);
+}
+EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms_no_sync);
void mctrl_gpio_enable_irq_wake(struct mctrl_gpios *gpios)
{
diff --git a/drivers/tty/serial/serial_mctrl_gpio.h b/drivers/tty/serial/serial_mctrl_gpio.h
index fc76910fb105..79e97838ebe5 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.h
+++ b/drivers/tty/serial/serial_mctrl_gpio.h
@@ -87,9 +87,16 @@ void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios);
void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios);
/*
- * Disable gpio interrupts to report status line changes.
+ * Disable gpio interrupts to report status line changes, and block until
+ * any corresponding IRQ is processed
*/
-void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios);
+void mctrl_gpio_disable_ms_sync(struct mctrl_gpios *gpios);
+
+/*
+ * Disable gpio interrupts to report status line changes, and return
+ * immediately
+ */
+void mctrl_gpio_disable_ms_no_sync(struct mctrl_gpios *gpios);
/*
* Enable gpio wakeup interrupts to enable wake up source.
@@ -148,7 +155,11 @@ static inline void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios)
{
}
-static inline void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
+static inline void mctrl_gpio_disable_ms_sync(struct mctrl_gpios *gpios)
+{
+}
+
+static inline void mctrl_gpio_disable_ms_no_sync(struct mctrl_gpios *gpios)
{
}
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index b1ea48f38248..0219135caafa 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -104,6 +104,20 @@ struct plat_sci_reg {
u8 offset, size;
};
+struct sci_suspend_regs {
+ u16 scdl;
+ u16 sccks;
+ u16 scsmr;
+ u16 scscr;
+ u16 scfcr;
+ u16 scsptr;
+ u16 hssrr;
+ u16 scpcr;
+ u16 scpdr;
+ u8 scbrr;
+ u8 semr;
+};
+
struct sci_port_params {
const struct plat_sci_reg regs[SCIx_NR_REGS];
unsigned int fifosize;
@@ -134,6 +148,8 @@ struct sci_port {
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
+ struct reset_control *rstc;
+
#ifdef CONFIG_SERIAL_SH_SCI_DMA
struct dma_chan *chan_tx_saved;
struct dma_chan *chan_rx_saved;
@@ -153,6 +169,7 @@ struct sci_port {
int rx_trigger;
struct timer_list rx_fifo_timer;
int rx_fifo_timeout;
+ struct sci_suspend_regs suspend_regs;
u16 hscif_tot;
bool has_rtscts;
@@ -2298,7 +2315,7 @@ static void sci_shutdown(struct uart_port *port)
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
s->autorts = false;
- mctrl_gpio_disable_ms(to_sci_port(port)->gpios);
+ mctrl_gpio_disable_ms_sync(to_sci_port(port)->gpios);
uart_port_lock_irqsave(port, &flags);
sci_stop_rx(port);
@@ -3374,6 +3391,7 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
}
sp = &sci_ports[id];
+ sp->rstc = rstc;
*dev_id = id;
p->type = SCI_OF_TYPE(data);
@@ -3546,13 +3564,77 @@ static int sci_probe(struct platform_device *dev)
return 0;
}
+static void sci_console_save(struct sci_port *s)
+{
+ struct sci_suspend_regs *regs = &s->suspend_regs;
+ struct uart_port *port = &s->port;
+
+ if (sci_getreg(port, SCDL)->size)
+ regs->scdl = sci_serial_in(port, SCDL);
+ if (sci_getreg(port, SCCKS)->size)
+ regs->sccks = sci_serial_in(port, SCCKS);
+ if (sci_getreg(port, SCSMR)->size)
+ regs->scsmr = sci_serial_in(port, SCSMR);
+ if (sci_getreg(port, SCSCR)->size)
+ regs->scscr = sci_serial_in(port, SCSCR);
+ if (sci_getreg(port, SCFCR)->size)
+ regs->scfcr = sci_serial_in(port, SCFCR);
+ if (sci_getreg(port, SCSPTR)->size)
+ regs->scsptr = sci_serial_in(port, SCSPTR);
+ if (sci_getreg(port, SCBRR)->size)
+ regs->scbrr = sci_serial_in(port, SCBRR);
+ if (sci_getreg(port, HSSRR)->size)
+ regs->hssrr = sci_serial_in(port, HSSRR);
+ if (sci_getreg(port, SCPCR)->size)
+ regs->scpcr = sci_serial_in(port, SCPCR);
+ if (sci_getreg(port, SCPDR)->size)
+ regs->scpdr = sci_serial_in(port, SCPDR);
+ if (sci_getreg(port, SEMR)->size)
+ regs->semr = sci_serial_in(port, SEMR);
+}
+
+static void sci_console_restore(struct sci_port *s)
+{
+ struct sci_suspend_regs *regs = &s->suspend_regs;
+ struct uart_port *port = &s->port;
+
+ if (sci_getreg(port, SCDL)->size)
+ sci_serial_out(port, SCDL, regs->scdl);
+ if (sci_getreg(port, SCCKS)->size)
+ sci_serial_out(port, SCCKS, regs->sccks);
+ if (sci_getreg(port, SCSMR)->size)
+ sci_serial_out(port, SCSMR, regs->scsmr);
+ if (sci_getreg(port, SCSCR)->size)
+ sci_serial_out(port, SCSCR, regs->scscr);
+ if (sci_getreg(port, SCFCR)->size)
+ sci_serial_out(port, SCFCR, regs->scfcr);
+ if (sci_getreg(port, SCSPTR)->size)
+ sci_serial_out(port, SCSPTR, regs->scsptr);
+ if (sci_getreg(port, SCBRR)->size)
+ sci_serial_out(port, SCBRR, regs->scbrr);
+ if (sci_getreg(port, HSSRR)->size)
+ sci_serial_out(port, HSSRR, regs->hssrr);
+ if (sci_getreg(port, SCPCR)->size)
+ sci_serial_out(port, SCPCR, regs->scpcr);
+ if (sci_getreg(port, SCPDR)->size)
+ sci_serial_out(port, SCPDR, regs->scpdr);
+ if (sci_getreg(port, SEMR)->size)
+ sci_serial_out(port, SEMR, regs->semr);
+}
+
static __maybe_unused int sci_suspend(struct device *dev)
{
struct sci_port *sport = dev_get_drvdata(dev);
- if (sport)
+ if (sport) {
uart_suspend_port(&sci_uart_driver, &sport->port);
+ if (!console_suspend_enabled && uart_console(&sport->port))
+ sci_console_save(sport);
+ else
+ return reset_control_assert(sport->rstc);
+ }
+
return 0;
}
@@ -3560,8 +3642,18 @@ static __maybe_unused int sci_resume(struct device *dev)
{
struct sci_port *sport = dev_get_drvdata(dev);
- if (sport)
+ if (sport) {
+ if (!console_suspend_enabled && uart_console(&sport->port)) {
+ sci_console_restore(sport);
+ } else {
+ int ret = reset_control_deassert(sport->rstc);
+
+ if (ret)
+ return ret;
+ }
+
uart_resume_port(&sci_uart_driver, &sport->port);
+ }
return 0;
}
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index 5904a2d4cefa..054a8e630ace 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -563,8 +563,11 @@ static void sifive_serial_break_ctl(struct uart_port *port, int break_state)
static int sifive_serial_startup(struct uart_port *port)
{
struct sifive_serial_port *ssp = port_to_sifive_serial_port(port);
+ unsigned long flags;
+ uart_port_lock_irqsave(&ssp->port, &flags);
__ssp_enable_rxwm(ssp);
+ uart_port_unlock_irqrestore(&ssp->port, flags);
return 0;
}
@@ -572,9 +575,12 @@ static int sifive_serial_startup(struct uart_port *port)
static void sifive_serial_shutdown(struct uart_port *port)
{
struct sifive_serial_port *ssp = port_to_sifive_serial_port(port);
+ unsigned long flags;
+ uart_port_lock_irqsave(&ssp->port, &flags);
__ssp_disable_rxwm(ssp);
__ssp_disable_txwm(ssp);
+ uart_port_unlock_irqrestore(&ssp->port, flags);
}
/**
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 0854ad8c90cd..ad06b760cfca 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -944,7 +944,7 @@ static void stm32_usart_enable_ms(struct uart_port *port)
static void stm32_usart_disable_ms(struct uart_port *port)
{
- mctrl_gpio_disable_ms(to_stm32_port(port)->gpios);
+ mctrl_gpio_disable_ms_sync(to_stm32_port(port)->gpios);
}
/* Transmit stop */
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 0bd6544e30a6..791e2f1f7c0b 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -193,13 +193,12 @@ int set_selection_user(const struct tiocl_selection __user *sel,
return -EFAULT;
/*
- * TIOCL_SELCLEAR, TIOCL_SELPOINTER and TIOCL_SELMOUSEREPORT are OK to
- * use without CAP_SYS_ADMIN as they do not modify the selection.
+ * TIOCL_SELCLEAR and TIOCL_SELPOINTER are OK to use without
+ * CAP_SYS_ADMIN as they do not modify the selection.
*/
switch (v.sel_mode) {
case TIOCL_SELCLEAR:
case TIOCL_SELPOINTER:
- case TIOCL_SELMOUSEREPORT:
break;
default:
if (!capable(CAP_SYS_ADMIN))
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 240ce135bbfb..f1294c29f484 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -677,13 +677,6 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
unsigned long flags;
int err;
- if (!ufshcd_cmd_inflight(lrbp->cmd)) {
- dev_err(hba->dev,
- "%s: skip abort. cmd at tag %d already completed.\n",
- __func__, tag);
- return FAILED;
- }
-
/* Skip task abort in case previous aborts failed and report failure */
if (lrbp->req_abort_skip) {
dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
@@ -692,6 +685,11 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
}
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+ if (!hwq) {
+ dev_err(hba->dev, "%s: skip abort. cmd at tag %d already completed.\n",
+ __func__, tag);
+ return FAILED;
+ }
if (ufshcd_mcq_sqe_search(hba, hwq, tag)) {
/*
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 464f13da259a..47ec4e4e4a2a 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -278,6 +278,7 @@ static const struct ufs_dev_quirk ufs_fixups[] = {
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
+ UFS_DEVICE_QUIRK_PA_HIBER8TIME |
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS },
{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
.model = UFS_ANY_MODEL,
@@ -5658,6 +5659,8 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
continue;
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+ if (!hwq)
+ continue;
if (force_compl) {
ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
@@ -7199,8 +7202,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
err = -EINVAL;
}
}
- ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
- (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
return err;
}
@@ -8384,6 +8385,31 @@ out:
return ret;
}
+/**
+ * ufshcd_quirk_override_pa_h8time - Ensures proper adjustment of PA_HIBERN8TIME.
+ * @hba: per-adapter instance
+ *
+ * Some UFS devices require specific adjustments to the PA_HIBERN8TIME parameter
+ * to ensure proper hibernation timing. This function retrieves the current
+ * PA_HIBERN8TIME value and increments it by 100us.
+ */
+static void ufshcd_quirk_override_pa_h8time(struct ufs_hba *hba)
+{
+ u32 pa_h8time;
+ int ret;
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME), &pa_h8time);
+ if (ret) {
+ dev_err(hba->dev, "Failed to get PA_HIBERN8TIME: %d\n", ret);
+ return;
+ }
+
+ /* Increment by 1 to increase hibernation time by 100 µs */
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), pa_h8time + 1);
+ if (ret)
+ dev_err(hba->dev, "Failed updating PA_HIBERN8TIME: %d\n", ret);
+}
+
static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
{
ufshcd_vops_apply_dev_quirks(hba);
@@ -8394,6 +8420,9 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
ufshcd_quirk_tune_host_pa_tactivate(hba);
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_HIBER8TIME)
+ ufshcd_quirk_override_pa_h8time(hba);
}
static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index 5ea3f9beb1bd..2436b9454480 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -34,7 +34,7 @@
* Exynos's Vendor specific registers for UFSHCI
*/
#define HCI_TXPRDT_ENTRY_SIZE 0x00
-#define PRDT_PREFECT_EN BIT(31)
+#define PRDT_PREFETCH_EN BIT(31)
#define HCI_RXPRDT_ENTRY_SIZE 0x04
#define HCI_1US_TO_CNT_VAL 0x0C
#define CNT_VAL_1US_MASK 0x3FF
@@ -1060,9 +1060,14 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba)
exynos_ufs_config_intr(ufs, DFES_DEF_L4_ERRS, UNIPRO_L4);
exynos_ufs_set_unipro_pclk_div(ufs);
+ exynos_ufs_setup_clocks(hba, true, PRE_CHANGE);
+
/* unipro */
exynos_ufs_config_unipro(ufs);
+ if (ufs->drv_data->pre_link)
+ ufs->drv_data->pre_link(ufs);
+
/* m-phy */
exynos_ufs_phy_init(ufs);
if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) {
@@ -1070,11 +1075,6 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba)
exynos_ufs_config_phy_cap_attr(ufs);
}
- exynos_ufs_setup_clocks(hba, true, PRE_CHANGE);
-
- if (ufs->drv_data->pre_link)
- ufs->drv_data->pre_link(ufs);
-
return 0;
}
@@ -1098,12 +1098,17 @@ static int exynos_ufs_post_link(struct ufs_hba *hba)
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
struct phy *generic_phy = ufs->phy;
struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
+ u32 val = ilog2(DATA_UNIT_SIZE);
exynos_ufs_establish_connt(ufs);
exynos_ufs_fit_aggr_timeout(ufs);
hci_writel(ufs, 0xa, HCI_DATA_REORDER);
- hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_TXPRDT_ENTRY_SIZE);
+
+ if (hba->caps & UFSHCD_CAP_CRYPTO)
+ val |= PRDT_PREFETCH_EN;
+ hci_writel(ufs, val, HCI_TXPRDT_ENTRY_SIZE);
+
hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_RXPRDT_ENTRY_SIZE);
hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
@@ -1517,6 +1522,14 @@ out:
return ret;
}
+static void exynos_ufs_exit(struct ufs_hba *hba)
+{
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+
+ phy_power_off(ufs->phy);
+ phy_exit(ufs->phy);
+}
+
static int exynos_ufs_host_reset(struct ufs_hba *hba)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
@@ -1687,6 +1700,12 @@ static void exynos_ufs_hibern8_notify(struct ufs_hba *hba,
}
}
+static int gs101_ufs_suspend(struct exynos_ufs *ufs)
+{
+ hci_writel(ufs, 0 << 0, HCI_GPIO_OUT);
+ return 0;
+}
+
static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
@@ -1695,6 +1714,9 @@ static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
if (status == PRE_CHANGE)
return 0;
+ if (ufs->drv_data->suspend)
+ ufs->drv_data->suspend(ufs);
+
if (!ufshcd_is_link_active(hba))
phy_power_off(ufs->phy);
@@ -1972,6 +1994,7 @@ static int gs101_ufs_pre_pwr_change(struct exynos_ufs *ufs,
static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
.name = "exynos_ufs",
.init = exynos_ufs_init,
+ .exit = exynos_ufs_exit,
.hce_enable_notify = exynos_ufs_hce_enable_notify,
.link_startup_notify = exynos_ufs_link_startup_notify,
.pwr_change_notify = exynos_ufs_pwr_change_notify,
@@ -2010,13 +2033,7 @@ static int exynos_ufs_probe(struct platform_device *pdev)
static void exynos_ufs_remove(struct platform_device *pdev)
{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
- struct exynos_ufs *ufs = ufshcd_get_variant(hba);
-
ufshcd_pltfrm_remove(pdev);
-
- phy_power_off(ufs->phy);
- phy_exit(ufs->phy);
}
static struct exynos_ufs_uic_attr exynos7_uic_attr = {
@@ -2162,6 +2179,7 @@ static const struct exynos_ufs_drv_data gs101_ufs_drvs = {
.pre_link = gs101_ufs_pre_link,
.post_link = gs101_ufs_post_link,
.pre_pwr_change = gs101_ufs_pre_pwr_change,
+ .suspend = gs101_ufs_suspend,
};
static const struct of_device_id exynos_ufs_of_match[] = {
diff --git a/drivers/ufs/host/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h
index d0b3df221503..3c6fe5132190 100644
--- a/drivers/ufs/host/ufs-exynos.h
+++ b/drivers/ufs/host/ufs-exynos.h
@@ -192,6 +192,7 @@ struct exynos_ufs_drv_data {
struct ufs_pa_layer_attr *pwr);
int (*pre_hce_enable)(struct exynos_ufs *ufs);
int (*post_hce_enable)(struct exynos_ufs *ufs);
+ int (*suspend)(struct exynos_ufs *ufs);
};
struct ufs_phy_time_cfg {
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 23b9f6efa047..a455a95f65fc 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -125,7 +125,7 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
int err;
int i;
- ice = of_qcom_ice_get(dev);
+ ice = devm_of_qcom_ice_get(dev);
if (ice == ERR_PTR(-EOPNOTSUPP)) {
dev_warn(dev, "Disabling inline encryption support\n");
ice = NULL;
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 1b19b5647495..69c1df0f4ca5 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -131,15 +131,12 @@ static void hv_uio_rescind(struct vmbus_channel *channel)
vmbus_device_unregister(channel->device_obj);
}
-/* Sysfs API to allow mmap of the ring buffers
+/* Function used for mmap of ring buffer sysfs interface.
* The ring buffer is allocated as contiguous memory by vmbus_open
*/
-static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj,
- const struct bin_attribute *attr,
- struct vm_area_struct *vma)
+static int
+hv_uio_ring_mmap(struct vmbus_channel *channel, struct vm_area_struct *vma)
{
- struct vmbus_channel *channel
- = container_of(kobj, struct vmbus_channel, kobj);
void *ring_buffer = page_address(channel->ringbuffer_page);
if (channel->state != CHANNEL_OPENED_STATE)
@@ -149,15 +146,6 @@ static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj,
channel->ringbuffer_pagecount << PAGE_SHIFT);
}
-static const struct bin_attribute ring_buffer_bin_attr = {
- .attr = {
- .name = "ring",
- .mode = 0600,
- },
- .size = 2 * SZ_2M,
- .mmap = hv_uio_ring_mmap,
-};
-
/* Callback from VMBUS subsystem when new channel created. */
static void
hv_uio_new_channel(struct vmbus_channel *new_sc)
@@ -178,8 +166,7 @@ hv_uio_new_channel(struct vmbus_channel *new_sc)
/* Disable interrupts on sub channel */
new_sc->inbound.ring_buffer->interrupt_mask = 1;
set_channel_read_mode(new_sc, HV_CALL_ISR);
-
- ret = sysfs_create_bin_file(&new_sc->kobj, &ring_buffer_bin_attr);
+ ret = hv_create_ring_sysfs(new_sc, hv_uio_ring_mmap);
if (ret) {
dev_err(device, "sysfs create ring bin file failed; %d\n", ret);
vmbus_close(new_sc);
@@ -350,10 +337,18 @@ hv_uio_probe(struct hv_device *dev,
goto fail_close;
}
- ret = sysfs_create_bin_file(&channel->kobj, &ring_buffer_bin_attr);
- if (ret)
- dev_notice(&dev->device,
- "sysfs create ring bin file failed; %d\n", ret);
+ /*
+ * This internally calls sysfs_update_group, which returns a non-zero value if it executes
+ * before sysfs_create_group. This is expected as the 'ring' will be created later in
+ * vmbus_device_register() -> vmbus_add_channel_kobj(). Thus, no need to check the return
+ * value and print warning.
+ *
+ * Creating/exposing sysfs in driver probe is not encouraged as it can lead to race
+ * conditions with userspace. For backward compatibility, "ring" sysfs could not be removed
+ * or decoupled from uio_hv_generic probe. Userspace programs can make use of inotify
+ * APIs to make sure that ring is created.
+ */
+ hv_create_ring_sysfs(channel, hv_uio_ring_mmap);
hv_set_drvdata(dev, pdata);
@@ -375,7 +370,7 @@ hv_uio_remove(struct hv_device *dev)
if (!pdata)
return;
- sysfs_remove_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr);
+ hv_remove_ring_sysfs(dev->channel);
uio_unregister_device(&pdata->info);
hv_uio_cleanup(dev, pdata);
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index fd1beb10bba7..19101ff1cf1b 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -1963,6 +1963,7 @@ static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data)
unsigned int bit;
unsigned long reg;
+ local_bh_disable();
spin_lock_irqsave(&priv_dev->lock, flags);
reg = readl(&priv_dev->regs->usb_ists);
@@ -2004,6 +2005,7 @@ static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data)
irqend:
writel(~0, &priv_dev->regs->ep_ien);
spin_unlock_irqrestore(&priv_dev->lock, flags);
+ local_bh_enable();
return ret;
}
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
index 97edf767ecee..d471409eb66c 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.c
+++ b/drivers/usb/cdns3/cdnsp-gadget.c
@@ -139,6 +139,26 @@ static void cdnsp_clear_port_change_bit(struct cdnsp_device *pdev,
(portsc & PORT_CHANGE_BITS), port_regs);
}
+static void cdnsp_set_apb_timeout_value(struct cdnsp_device *pdev)
+{
+ struct cdns *cdns = dev_get_drvdata(pdev->dev);
+ __le32 __iomem *reg;
+ void __iomem *base;
+ u32 offset = 0;
+ u32 val;
+
+ if (!cdns->override_apb_timeout)
+ return;
+
+ base = &pdev->cap_regs->hc_capbase;
+ offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP);
+ reg = base + offset + REG_CHICKEN_BITS_3_OFFSET;
+
+ val = le32_to_cpu(readl(reg));
+ val = CHICKEN_APB_TIMEOUT_SET(val, cdns->override_apb_timeout);
+ writel(cpu_to_le32(val), reg);
+}
+
static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit)
{
__le32 __iomem *reg;
@@ -1773,6 +1793,8 @@ static void cdnsp_get_rev_cap(struct cdnsp_device *pdev)
reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP);
pdev->rev_cap = reg;
+ pdev->rtl_revision = readl(&pdev->rev_cap->rtl_revision);
+
dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n",
readl(&pdev->rev_cap->ctrl_revision),
readl(&pdev->rev_cap->rtl_revision),
@@ -1798,6 +1820,15 @@ static int cdnsp_gen_setup(struct cdnsp_device *pdev)
pdev->hci_version = HC_VERSION(pdev->hcc_params);
pdev->hcc_params = readl(&pdev->cap_regs->hcc_params);
+ /*
+ * Override the APB timeout value to give the controller more time for
+ * enabling UTMI clock and synchronizing APB and UTMI clock domains.
+ * This fix is platform specific and is required to fixes issue with
+ * reading incorrect value from PORTSC register after resuming
+ * from L1 state.
+ */
+ cdnsp_set_apb_timeout_value(pdev);
+
cdnsp_get_rev_cap(pdev);
/* Make sure the Device Controller is halted. */
diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h
index 84887dfea763..12534be52f39 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.h
+++ b/drivers/usb/cdns3/cdnsp-gadget.h
@@ -520,6 +520,9 @@ struct cdnsp_rev_cap {
#define REG_CHICKEN_BITS_2_OFFSET 0x48
#define CHICKEN_XDMA_2_TP_CACHE_DIS BIT(28)
+#define REG_CHICKEN_BITS_3_OFFSET 0x4C
+#define CHICKEN_APB_TIMEOUT_SET(p, val) (((p) & ~GENMASK(21, 0)) | (val))
+
/* XBUF Extended Capability ID. */
#define XBUF_CAP_ID 0xCB
#define XBUF_RX_TAG_MASK_0_OFFSET 0x1C
@@ -1357,6 +1360,7 @@ struct cdnsp_port {
* @rev_cap: Controller Capabilities Registers.
* @hcs_params1: Cached register copies of read-only HCSPARAMS1
* @hcc_params: Cached register copies of read-only HCCPARAMS1
+ * @rtl_revision: Cached controller rtl revision.
* @setup: Temporary buffer for setup packet.
* @ep0_preq: Internal allocated request used during enumeration.
* @ep0_stage: ep0 stage during enumeration process.
@@ -1411,6 +1415,8 @@ struct cdnsp_device {
__u32 hcs_params1;
__u32 hcs_params3;
__u32 hcc_params;
+ #define RTL_REVISION_NEW_LPM 0x2700
+ __u32 rtl_revision;
/* Lock used in interrupt thread context. */
spinlock_t lock;
struct usb_ctrlrequest setup;
diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c
index a51144504ff3..8c361b8394e9 100644
--- a/drivers/usb/cdns3/cdnsp-pci.c
+++ b/drivers/usb/cdns3/cdnsp-pci.c
@@ -28,6 +28,8 @@
#define PCI_DRIVER_NAME "cdns-pci-usbssp"
#define PLAT_DRIVER_NAME "cdns-usbssp"
+#define CHICKEN_APB_TIMEOUT_VALUE 0x1C20
+
static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev)
{
/*
@@ -139,6 +141,14 @@ static int cdnsp_pci_probe(struct pci_dev *pdev,
cdnsp->otg_irq = pdev->irq;
}
+ /*
+ * Cadence PCI based platform require some longer timeout for APB
+ * to fixes domain clock synchronization issue after resuming
+ * controller from L1 state.
+ */
+ cdnsp->override_apb_timeout = CHICKEN_APB_TIMEOUT_VALUE;
+ pci_set_drvdata(pdev, cdnsp);
+
if (pci_is_enabled(func)) {
cdnsp->dev = dev;
cdnsp->gadget_init = cdnsp_gadget_init;
@@ -148,8 +158,6 @@ static int cdnsp_pci_probe(struct pci_dev *pdev,
goto free_cdnsp;
}
- pci_set_drvdata(pdev, cdnsp);
-
device_wakeup_enable(&pdev->dev);
if (pci_dev_run_wake(pdev))
pm_runtime_put_noidle(&pdev->dev);
diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
index 46852529499d..fd06cb85c4ea 100644
--- a/drivers/usb/cdns3/cdnsp-ring.c
+++ b/drivers/usb/cdns3/cdnsp-ring.c
@@ -308,7 +308,8 @@ static bool cdnsp_ring_ep_doorbell(struct cdnsp_device *pdev,
writel(db_value, reg_addr);
- cdnsp_force_l0_go(pdev);
+ if (pdev->rtl_revision < RTL_REVISION_NEW_LPM)
+ cdnsp_force_l0_go(pdev);
/* Doorbell was set. */
return true;
diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h
index 57d47348dc19..ac30ee21309d 100644
--- a/drivers/usb/cdns3/core.h
+++ b/drivers/usb/cdns3/core.h
@@ -79,6 +79,8 @@ struct cdns3_platform_data {
* @pdata: platform data from glue layer
* @lock: spinlock structure
* @xhci_plat_data: xhci private data structure pointer
+ * @override_apb_timeout: hold value of APB timeout. For value 0 the default
+ * value in CHICKEN_BITS_3 will be preserved.
* @gadget_init: pointer to gadget initialization function
*/
struct cdns {
@@ -117,6 +119,7 @@ struct cdns {
struct cdns3_platform_data *pdata;
spinlock_t lock;
struct xhci_plat_priv *xhci_plat_data;
+ u32 override_apb_timeout;
int (*gadget_init)(struct cdns *cdns);
};
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 1a7fc638213e..4f8bfd242b59 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -336,6 +336,13 @@ static int ci_hdrc_imx_notify_event(struct ci_hdrc *ci, unsigned int event)
return ret;
}
+static void ci_hdrc_imx_disable_regulator(void *arg)
+{
+ struct ci_hdrc_imx_data *data = arg;
+
+ regulator_disable(data->hsic_pad_regulator);
+}
+
static int ci_hdrc_imx_probe(struct platform_device *pdev)
{
struct ci_hdrc_imx_data *data;
@@ -394,6 +401,13 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
"Failed to enable HSIC pad regulator\n");
goto err_put;
}
+ ret = devm_add_action_or_reset(dev,
+ ci_hdrc_imx_disable_regulator, data);
+ if (ret) {
+ dev_err(dev,
+ "Failed to add regulator devm action\n");
+ goto err_put;
+ }
}
}
@@ -432,11 +446,11 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
ret = imx_get_clks(dev);
if (ret)
- goto disable_hsic_regulator;
+ goto qos_remove_request;
ret = imx_prepare_enable_clks(dev);
if (ret)
- goto disable_hsic_regulator;
+ goto qos_remove_request;
ret = clk_prepare_enable(data->clk_wakeup);
if (ret)
@@ -470,7 +484,11 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
of_usb_get_phy_mode(np) == USBPHY_INTERFACE_MODE_ULPI) {
pdata.flags |= CI_HDRC_OVERRIDE_PHY_CONTROL;
data->override_phy_control = true;
- usb_phy_init(pdata.usb_phy);
+ ret = usb_phy_init(pdata.usb_phy);
+ if (ret) {
+ dev_err(dev, "Failed to init phy\n");
+ goto err_clk;
+ }
}
if (pdata.flags & CI_HDRC_SUPPORTS_RUNTIME_PM)
@@ -479,7 +497,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
ret = imx_usbmisc_init(data->usbmisc_data);
if (ret) {
dev_err(dev, "usbmisc init failed, ret=%d\n", ret);
- goto err_clk;
+ goto phy_shutdown;
}
data->ci_pdev = ci_hdrc_add_device(dev,
@@ -488,7 +506,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
if (IS_ERR(data->ci_pdev)) {
ret = PTR_ERR(data->ci_pdev);
dev_err_probe(dev, ret, "ci_hdrc_add_device failed\n");
- goto err_clk;
+ goto phy_shutdown;
}
if (data->usbmisc_data) {
@@ -522,19 +540,20 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
disable_device:
ci_hdrc_remove_device(data->ci_pdev);
+phy_shutdown:
+ if (data->override_phy_control)
+ usb_phy_shutdown(data->phy);
err_clk:
clk_disable_unprepare(data->clk_wakeup);
err_wakeup_clk:
imx_disable_unprepare_clks(dev);
-disable_hsic_regulator:
- if (data->hsic_pad_regulator)
- /* don't overwrite original ret (cf. EPROBE_DEFER) */
- regulator_disable(data->hsic_pad_regulator);
+qos_remove_request:
if (pdata.flags & CI_HDRC_PMQOS)
cpu_latency_qos_remove_request(&data->pm_qos_req);
data->ci_pdev = NULL;
err_put:
- put_device(data->usbmisc_data->dev);
+ if (data->usbmisc_data)
+ put_device(data->usbmisc_data->dev);
return ret;
}
@@ -556,10 +575,9 @@ static void ci_hdrc_imx_remove(struct platform_device *pdev)
clk_disable_unprepare(data->clk_wakeup);
if (data->plat_data->flags & CI_HDRC_PMQOS)
cpu_latency_qos_remove_request(&data->pm_qos_req);
- if (data->hsic_pad_regulator)
- regulator_disable(data->hsic_pad_regulator);
}
- put_device(data->usbmisc_data->dev);
+ if (data->usbmisc_data)
+ put_device(data->usbmisc_data->dev);
}
static void ci_hdrc_imx_shutdown(struct platform_device *pdev)
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 86ee39db013f..16e7fa4d488d 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -726,7 +726,7 @@ static int wdm_open(struct inode *inode, struct file *file)
rv = -EBUSY;
goto out;
}
-
+ smp_rmb(); /* ordered against wdm_wwan_port_stop() */
rv = usb_autopm_get_interface(desc->intf);
if (rv < 0) {
dev_err(&desc->intf->dev, "Error autopm - %d\n", rv);
@@ -829,6 +829,7 @@ static struct usb_class_driver wdm_class = {
static int wdm_wwan_port_start(struct wwan_port *port)
{
struct wdm_device *desc = wwan_port_get_drvdata(port);
+ int rv;
/* The interface is both exposed via the WWAN framework and as a
* legacy usbmisc chardev. If chardev is already open, just fail
@@ -848,7 +849,15 @@ static int wdm_wwan_port_start(struct wwan_port *port)
wwan_port_txon(port);
/* Start getting events */
- return usb_submit_urb(desc->validity, GFP_KERNEL);
+ rv = usb_submit_urb(desc->validity, GFP_KERNEL);
+ if (rv < 0) {
+ wwan_port_txoff(port);
+ desc->manage_power(desc->intf, 0);
+ /* this must be last lest we race with chardev open */
+ clear_bit(WDM_WWAN_IN_USE, &desc->flags);
+ }
+
+ return rv;
}
static void wdm_wwan_port_stop(struct wwan_port *port)
@@ -859,8 +868,10 @@ static void wdm_wwan_port_stop(struct wwan_port *port)
poison_urbs(desc);
desc->manage_power(desc->intf, 0);
clear_bit(WDM_READ, &desc->flags);
- clear_bit(WDM_WWAN_IN_USE, &desc->flags);
unpoison_urbs(desc);
+ smp_wmb(); /* ordered against wdm_open() */
+ /* this must be last lest we open a poisoned device */
+ clear_bit(WDM_WWAN_IN_USE, &desc->flags);
}
static void wdm_wwan_port_tx_complete(struct urb *urb)
@@ -868,7 +879,7 @@ static void wdm_wwan_port_tx_complete(struct urb *urb)
struct sk_buff *skb = urb->context;
struct wdm_device *desc = skb_shinfo(skb)->destructor_arg;
- usb_autopm_put_interface(desc->intf);
+ usb_autopm_put_interface_async(desc->intf);
wwan_port_txon(desc->wwanp);
kfree_skb(skb);
}
@@ -898,7 +909,7 @@ static int wdm_wwan_port_tx(struct wwan_port *port, struct sk_buff *skb)
req->bRequestType = (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
req->wValue = 0;
- req->wIndex = desc->inum;
+ req->wIndex = desc->inum; /* already converted */
req->wLength = cpu_to_le16(skb->len);
skb_shinfo(skb)->destructor_arg = desc;
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 34e46ef308ab..66f3d9324ba2 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -482,6 +482,8 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb)
u8 *buffer;
u8 tag;
int rv;
+ long wait_rv;
+ unsigned long expire;
dev_dbg(dev, "Enter ioctl_read_stb iin_ep_present: %d\n",
data->iin_ep_present);
@@ -511,16 +513,18 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb)
}
if (data->iin_ep_present) {
- rv = wait_event_interruptible_timeout(
+ expire = msecs_to_jiffies(file_data->timeout);
+ wait_rv = wait_event_interruptible_timeout(
data->waitq,
atomic_read(&data->iin_data_valid) != 0,
- file_data->timeout);
- if (rv < 0) {
- dev_dbg(dev, "wait interrupted %d\n", rv);
+ expire);
+ if (wait_rv < 0) {
+ dev_dbg(dev, "wait interrupted %ld\n", wait_rv);
+ rv = wait_rv;
goto exit;
}
- if (rv == 0) {
+ if (wait_rv == 0) {
dev_dbg(dev, "wait timed out\n");
rv = -ETIMEDOUT;
goto exit;
@@ -539,6 +543,8 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb)
dev_dbg(dev, "stb:0x%02x received %d\n", (unsigned int)*stb, rv);
+ rv = 0;
+
exit:
/* bump interrupt bTag */
data->iin_bTag += 1;
@@ -602,9 +608,9 @@ static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data,
{
struct usbtmc_device_data *data = file_data->data;
struct device *dev = &data->intf->dev;
- int rv;
u32 timeout;
unsigned long expire;
+ long wait_rv;
if (!data->iin_ep_present) {
dev_dbg(dev, "no interrupt endpoint present\n");
@@ -618,25 +624,24 @@ static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data,
mutex_unlock(&data->io_mutex);
- rv = wait_event_interruptible_timeout(
- data->waitq,
- atomic_read(&file_data->srq_asserted) != 0 ||
- atomic_read(&file_data->closing),
- expire);
+ wait_rv = wait_event_interruptible_timeout(
+ data->waitq,
+ atomic_read(&file_data->srq_asserted) != 0 ||
+ atomic_read(&file_data->closing),
+ expire);
mutex_lock(&data->io_mutex);
/* Note! disconnect or close could be called in the meantime */
if (atomic_read(&file_data->closing) || data->zombie)
- rv = -ENODEV;
+ return -ENODEV;
- if (rv < 0) {
- /* dev can be invalid now! */
- pr_debug("%s - wait interrupted %d\n", __func__, rv);
- return rv;
+ if (wait_rv < 0) {
+ dev_dbg(dev, "%s - wait interrupted %ld\n", __func__, wait_rv);
+ return wait_rv;
}
- if (rv == 0) {
+ if (wait_rv == 0) {
dev_dbg(dev, "%s - wait timed out\n", __func__);
return -ETIMEDOUT;
}
@@ -830,6 +835,7 @@ static ssize_t usbtmc_generic_read(struct usbtmc_file_data *file_data,
unsigned long expire;
int bufcount = 1;
int again = 0;
+ long wait_rv;
/* mutex already locked */
@@ -942,19 +948,24 @@ static ssize_t usbtmc_generic_read(struct usbtmc_file_data *file_data,
if (!(flags & USBTMC_FLAG_ASYNC)) {
dev_dbg(dev, "%s: before wait time %lu\n",
__func__, expire);
- retval = wait_event_interruptible_timeout(
+ wait_rv = wait_event_interruptible_timeout(
file_data->wait_bulk_in,
usbtmc_do_transfer(file_data),
expire);
- dev_dbg(dev, "%s: wait returned %d\n",
- __func__, retval);
+ dev_dbg(dev, "%s: wait returned %ld\n",
+ __func__, wait_rv);
- if (retval <= 0) {
- if (retval == 0)
- retval = -ETIMEDOUT;
+ if (wait_rv < 0) {
+ retval = wait_rv;
goto error;
}
+
+ if (wait_rv == 0) {
+ retval = -ETIMEDOUT;
+ goto error;
+ }
+
}
urb = usb_get_from_anchor(&file_data->in_anchor);
@@ -1380,7 +1391,10 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
if (!buffer)
return -ENOMEM;
- mutex_lock(&data->io_mutex);
+ retval = mutex_lock_interruptible(&data->io_mutex);
+ if (retval < 0)
+ goto exit_nolock;
+
if (data->zombie) {
retval = -ENODEV;
goto exit;
@@ -1503,6 +1517,7 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
exit:
mutex_unlock(&data->io_mutex);
+exit_nolock:
kfree(buffer);
return retval;
}
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 8efbacc5bc34..53d68d20fb62 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -369,6 +369,12 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM },
{ USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM },
+ /* SanDisk Corp. SanDisk 3.2Gen1 */
+ { USB_DEVICE(0x0781, 0x55a3), .driver_info = USB_QUIRK_DELAY_INIT },
+
+ /* SanDisk Extreme 55AE */
+ { USB_DEVICE(0x0781, 0x55ae), .driver_info = USB_QUIRK_NO_LPM },
+
/* Realforce 87U Keyboard */
{ USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM },
@@ -383,6 +389,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0904, 0x6103), .driver_info =
USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+ /* Silicon Motion Flash Drive */
+ { USB_DEVICE(0x090c, 0x1000), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Sound Devices USBPre2 */
{ USB_DEVICE(0x0926, 0x0202), .driver_info =
USB_QUIRK_ENDPOINT_IGNORE },
@@ -539,6 +548,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x2040, 0x7200), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
+ /* VLI disk */
+ { USB_DEVICE(0x2109, 0x0711), .driver_info = USB_QUIRK_NO_LPM },
+
/* Raydium Touchscreen */
{ USB_DEVICE(0x2386, 0x3114), .driver_info = USB_QUIRK_NO_LPM },
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index aaa39e663f60..27eae4cf223d 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1164,6 +1164,9 @@ struct dwc3_scratchpad_array {
* @gsbuscfg0_reqinfo: store GSBUSCFG0.DATRDREQINFO, DESRDREQINFO,
* DATWRREQINFO, and DESWRREQINFO value passed from
* glue driver.
+ * @wakeup_pending_funcs: Indicates whether any interface has requested for
+ * function wakeup in bitmap format where bit position
+ * represents interface_id.
*/
struct dwc3 {
struct work_struct drd_work;
@@ -1394,6 +1397,7 @@ struct dwc3 {
int num_ep_resized;
struct dentry *debug_root;
u32 gsbuscfg0_reqinfo;
+ u32 wakeup_pending_funcs;
};
#define INCRX_BURST_MODE 0
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 052852f80146..54a4ee2b90b7 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -148,11 +148,21 @@ static const struct property_entry dwc3_pci_intel_byt_properties[] = {
{}
};
+/*
+ * Intel Merrifield SoC uses these endpoints for tracing and they cannot
+ * be re-allocated if being used because the side band flow control signals
+ * are hard wired to certain endpoints:
+ * - 1 High BW Bulk IN (IN#1) (RTIT)
+ * - 1 1KB BW Bulk IN (IN#8) + 1 1KB BW Bulk OUT (Run Control) (OUT#8)
+ */
+static const u8 dwc3_pci_mrfld_reserved_endpoints[] = { 3, 16, 17 };
+
static const struct property_entry dwc3_pci_mrfld_properties[] = {
PROPERTY_ENTRY_STRING("dr_mode", "otg"),
PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+ PROPERTY_ENTRY_U8_ARRAY("snps,reserved-endpoints", dwc3_pci_mrfld_reserved_endpoints),
PROPERTY_ENTRY_BOOL("snps,usb2-gadget-lpm-disable"),
PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{}
diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
index a33a42ba0249..4ca7f6240d07 100644
--- a/drivers/usb/dwc3/dwc3-xilinx.c
+++ b/drivers/usb/dwc3/dwc3-xilinx.c
@@ -207,15 +207,13 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
skip_usb3_phy:
/* ulpi reset via gpio-modepin or gpio-framework driver */
- reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(reset_gpio)) {
return dev_err_probe(dev, PTR_ERR(reset_gpio),
"Failed to request reset GPIO\n");
}
if (reset_gpio) {
- /* Toggle ulpi to reset the phy. */
- gpiod_set_value_cansleep(reset_gpio, 1);
usleep_range(5000, 10000);
gpiod_set_value_cansleep(reset_gpio, 0);
usleep_range(5000, 10000);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 89a4dc8ebf94..36384a49618e 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -276,8 +276,6 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned int cmd,
return ret;
}
-static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async);
-
/**
* dwc3_send_gadget_ep_cmd - issue an endpoint command
* @dep: the endpoint to which the command is going to be issued
@@ -547,6 +545,7 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep)
int dwc3_gadget_start_config(struct dwc3 *dwc, unsigned int resource_index)
{
struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3_ep *dep;
u32 cmd;
int i;
int ret;
@@ -563,8 +562,13 @@ int dwc3_gadget_start_config(struct dwc3 *dwc, unsigned int resource_index)
return ret;
/* Reset resource allocation flags */
- for (i = resource_index; i < dwc->num_eps && dwc->eps[i]; i++)
- dwc->eps[i]->flags &= ~DWC3_EP_RESOURCE_ALLOCATED;
+ for (i = resource_index; i < dwc->num_eps; i++) {
+ dep = dwc->eps[i];
+ if (!dep)
+ continue;
+
+ dep->flags &= ~DWC3_EP_RESOURCE_ALLOCATED;
+ }
return 0;
}
@@ -751,9 +755,11 @@ void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc)
dwc->last_fifo_depth = fifo_depth;
/* Clear existing TXFIFO for all IN eps except ep0 */
- for (num = 3; num < min_t(int, dwc->num_eps, DWC3_ENDPOINTS_NUM);
- num += 2) {
+ for (num = 3; num < min_t(int, dwc->num_eps, DWC3_ENDPOINTS_NUM); num += 2) {
dep = dwc->eps[num];
+ if (!dep)
+ continue;
+
/* Don't change TXFRAMNUM on usb31 version */
size = DWC3_IP_IS(DWC3) ? 0 :
dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1)) &
@@ -2351,10 +2357,8 @@ static int dwc3_gadget_get_frame(struct usb_gadget *g)
return __dwc3_gadget_get_frame(dwc);
}
-static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async)
+static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
{
- int retries;
-
int ret;
u32 reg;
@@ -2382,8 +2386,7 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async)
return -EINVAL;
}
- if (async)
- dwc3_gadget_enable_linksts_evts(dwc, true);
+ dwc3_gadget_enable_linksts_evts(dwc, true);
ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
if (ret < 0) {
@@ -2402,27 +2405,8 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async)
/*
* Since link status change events are enabled we will receive
- * an U0 event when wakeup is successful. So bail out.
+ * an U0 event when wakeup is successful.
*/
- if (async)
- return 0;
-
- /* poll until Link State changes to ON */
- retries = 20000;
-
- while (retries--) {
- reg = dwc3_readl(dwc->regs, DWC3_DSTS);
-
- /* in HS, means ON */
- if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
- break;
- }
-
- if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
- dev_err(dwc->dev, "failed to send remote wakeup\n");
- return -EINVAL;
- }
-
return 0;
}
@@ -2443,7 +2427,7 @@ static int dwc3_gadget_wakeup(struct usb_gadget *g)
spin_unlock_irqrestore(&dwc->lock, flags);
return -EINVAL;
}
- ret = __dwc3_gadget_wakeup(dwc, true);
+ ret = __dwc3_gadget_wakeup(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -2471,14 +2455,10 @@ static int dwc3_gadget_func_wakeup(struct usb_gadget *g, int intf_id)
*/
link_state = dwc3_gadget_get_link_state(dwc);
if (link_state == DWC3_LINK_STATE_U3) {
- ret = __dwc3_gadget_wakeup(dwc, false);
- if (ret) {
- spin_unlock_irqrestore(&dwc->lock, flags);
- return -EINVAL;
- }
- dwc3_resume_gadget(dwc);
- dwc->suspended = false;
- dwc->link_state = DWC3_LINK_STATE_U0;
+ dwc->wakeup_pending_funcs |= BIT(intf_id);
+ ret = __dwc3_gadget_wakeup(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return ret;
}
ret = dwc3_send_gadget_generic_command(dwc, DWC3_DGCMD_DEV_NOTIFICATION,
@@ -3703,6 +3683,8 @@ out:
for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
dep = dwc->eps[i];
+ if (!dep)
+ continue;
if (!(dep->flags & DWC3_EP_ENABLED))
continue;
@@ -3852,6 +3834,10 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
u8 epnum = event->endpoint_number;
dep = dwc->eps[epnum];
+ if (!dep) {
+ dev_warn(dwc->dev, "spurious event, endpoint %u is not allocated\n", epnum);
+ return;
+ }
if (!(dep->flags & DWC3_EP_ENABLED)) {
if ((epnum > 1) && !(dep->flags & DWC3_EP_TRANSFER_STARTED))
@@ -4300,6 +4286,8 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
{
enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
unsigned int pwropt;
+ int ret;
+ int intf_id;
/*
* WORKAROUND: DWC3 < 2.50a have an issue when configured without
@@ -4375,7 +4363,7 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
switch (next) {
case DWC3_LINK_STATE_U0:
- if (dwc->gadget->wakeup_armed) {
+ if (dwc->gadget->wakeup_armed || dwc->wakeup_pending_funcs) {
dwc3_gadget_enable_linksts_evts(dwc, false);
dwc3_resume_gadget(dwc);
dwc->suspended = false;
@@ -4398,6 +4386,18 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
}
dwc->link_state = next;
+
+ /* Proceed with func wakeup if any interfaces that has requested */
+ while (dwc->wakeup_pending_funcs && (next == DWC3_LINK_STATE_U0)) {
+ intf_id = ffs(dwc->wakeup_pending_funcs) - 1;
+ ret = dwc3_send_gadget_generic_command(dwc, DWC3_DGCMD_DEV_NOTIFICATION,
+ DWC3_DGCMDPAR_DN_FUNC_WAKE |
+ DWC3_DGCMDPAR_INTF_SEL(intf_id));
+ if (ret)
+ dev_err(dwc->dev, "Failed to send DN wake for intf %d\n", intf_id);
+
+ dwc->wakeup_pending_funcs &= ~BIT(intf_id);
+ }
}
static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
@@ -4564,6 +4564,12 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
if (!count)
return IRQ_NONE;
+ if (count > evt->length) {
+ dev_err_ratelimited(dwc->dev, "invalid count(%u) > evt->length(%u)\n",
+ count, evt->length);
+ return IRQ_NONE;
+ }
+
evt->count = count;
evt->flags |= DWC3_EVENT_PENDING;
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 869ad99afb48..8dbc132a505e 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -2011,15 +2011,13 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
if (f->get_status) {
status = f->get_status(f);
+
if (status < 0)
break;
- } else {
- /* Set D0 and D1 bits based on func wakeup capability */
- if (f->config->bmAttributes & USB_CONFIG_ATT_WAKEUP) {
- status |= USB_INTRF_STAT_FUNC_RW_CAP;
- if (f->func_wakeup_armed)
- status |= USB_INTRF_STAT_FUNC_RW;
- }
+
+ /* if D5 is not set, then device is not wakeup capable */
+ if (!(f->config->bmAttributes & USB_CONFIG_ATT_WAKEUP))
+ status &= ~(USB_INTRF_STAT_FUNC_RW_CAP | USB_INTRF_STAT_FUNC_RW);
}
put_unaligned_le16(status & 0x0000ffff, req->buf);
diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
index 80841de845b0..027226325039 100644
--- a/drivers/usb/gadget/function/f_ecm.c
+++ b/drivers/usb/gadget/function/f_ecm.c
@@ -892,6 +892,12 @@ static void ecm_resume(struct usb_function *f)
gether_resume(&ecm->port);
}
+static int ecm_get_status(struct usb_function *f)
+{
+ return (f->func_wakeup_armed ? USB_INTRF_STAT_FUNC_RW : 0) |
+ USB_INTRF_STAT_FUNC_RW_CAP;
+}
+
static void ecm_free(struct usb_function *f)
{
struct f_ecm *ecm;
@@ -960,6 +966,7 @@ static struct usb_function *ecm_alloc(struct usb_function_instance *fi)
ecm->port.func.disable = ecm_disable;
ecm->port.func.free_func = ecm_free;
ecm->port.func.suspend = ecm_suspend;
+ ecm->port.func.get_status = ecm_get_status;
ecm->port.func.resume = ecm_resume;
return &ecm->port.func;
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 2eae8fc2e0db..94d478b6bcd3 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -2142,8 +2142,8 @@ static int do_scsi_command(struct fsg_common *common)
* of Posix locks.
*/
case FORMAT_UNIT:
- case RELEASE:
- case RESERVE:
+ case RELEASE_6:
+ case RESERVE_6:
case SEND_DIAGNOSTIC:
default:
diff --git a/drivers/usb/gadget/function/f_midi2.c b/drivers/usb/gadget/function/f_midi2.c
index 12e866fb311d..0a800ba53816 100644
--- a/drivers/usb/gadget/function/f_midi2.c
+++ b/drivers/usb/gadget/function/f_midi2.c
@@ -475,7 +475,7 @@ static void reply_ump_stream_ep_info(struct f_midi2_ep *ep)
/* reply a UMP EP device info */
static void reply_ump_stream_ep_device(struct f_midi2_ep *ep)
{
- struct snd_ump_stream_msg_devince_info rep = {
+ struct snd_ump_stream_msg_device_info rep = {
.type = UMP_MSG_TYPE_STREAM,
.status = UMP_STREAM_MSG_STATUS_DEVICE_INFO,
.manufacture_id = ep->info.manufacturer,
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/dev.c b/drivers/usb/gadget/udc/aspeed-vhub/dev.c
index 573109ca5b79..a09f72772e6e 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/dev.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/dev.c
@@ -548,6 +548,9 @@ int ast_vhub_init_dev(struct ast_vhub *vhub, unsigned int idx)
d->vhub = vhub;
d->index = idx;
d->name = devm_kasprintf(parent, GFP_KERNEL, "port%d", idx+1);
+ if (!d->name)
+ return -ENOMEM;
+
d->regs = vhub->regs + 0x100 + 0x10 * idx;
ast_vhub_init_ep0(vhub, &d->ep0, d);
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index c7fdbc55fb0b..2957316fd3d0 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -1749,6 +1749,10 @@ static int __tegra_xudc_ep_disable(struct tegra_xudc_ep *ep)
val = xudc_readl(xudc, CTRL);
val &= ~CTRL_RUN;
xudc_writel(xudc, val, CTRL);
+
+ val = xudc_readl(xudc, ST);
+ if (val & ST_RC)
+ xudc_writel(xudc, ST_RC, ST);
}
dev_info(xudc->dev, "ep %u disabled\n", ep->index);
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index 0881fdd1823e..dcf31a592f5d 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -1946,6 +1946,12 @@ max3421_remove(struct spi_device *spi)
usb_put_hcd(hcd);
}
+static const struct spi_device_id max3421_spi_ids[] = {
+ { "max3421" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, max3421_spi_ids);
+
static const struct of_device_id max3421_of_match_table[] = {
{ .compatible = "maxim,max3421", },
{},
@@ -1955,6 +1961,7 @@ MODULE_DEVICE_TABLE(of, max3421_of_match_table);
static struct spi_driver max3421_driver = {
.probe = max3421_probe,
.remove = max3421_remove,
+ .id_table = max3421_spi_ids,
.driver = {
.name = "max3421-hcd",
.of_match_table = max3421_of_match_table,
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 900ea0d368e0..9f0a6b27e47c 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -165,6 +165,25 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
return 0;
}
+static int ohci_quirk_loongson(struct usb_hcd *hcd)
+{
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+
+ /*
+ * Loongson's LS7A OHCI controller (rev 0x02) has a
+ * flaw. MMIO register with offset 0x60/64 is treated
+ * as legacy PS2-compatible keyboard/mouse interface.
+ * Since OHCI only use 4KB BAR resource, LS7A OHCI's
+ * 32KB BAR is wrapped around (the 2nd 4KB BAR space
+ * is the same as the 1st 4KB internally). So add 4KB
+ * offset (0x1000) to the OHCI registers as a quirk.
+ */
+ if (pdev->revision == 0x2)
+ hcd->regs += SZ_4K; /* SZ_4K = 0x1000 */
+
+ return 0;
+}
+
static int ohci_quirk_qemu(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
@@ -225,6 +244,10 @@ static const struct pci_device_id ohci_pci_quirks[] = {
.driver_data = (unsigned long)ohci_quirk_amd700,
},
{
+ PCI_DEVICE(PCI_VENDOR_ID_LOONGSON, 0x7a24),
+ .driver_data = (unsigned long)ohci_quirk_loongson,
+ },
+ {
.vendor = PCI_VENDOR_ID_APPLE,
.device = 0x003f,
.subvendor = PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index a7c934404ebc..62318291f566 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -121,7 +121,7 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
}
/* Get and enable clock if any specified */
- uhci->clk = devm_clk_get(&pdev->dev, NULL);
+ uhci->clk = devm_clk_get_optional(&pdev->dev, NULL);
if (IS_ERR(uhci->clk)) {
ret = PTR_ERR(uhci->clk);
goto err_rmr;
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index fd7895b24367..0d4ce5734165 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -823,6 +823,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
{
dma_addr_t deq;
union xhci_trb *evt;
+ enum evtreturn ret = EVT_DONE;
u32 ctrl, portsc;
bool update_erdp = false;
@@ -909,6 +910,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
break;
case TRB_TYPE(TRB_TRANSFER):
dbc_handle_xfer_event(dbc, evt);
+ ret = EVT_XFER_DONE;
break;
default:
break;
@@ -927,7 +929,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
lo_hi_writeq(deq, &dbc->regs->erdp);
}
- return EVT_DONE;
+ return ret;
}
static void xhci_dbc_handle_events(struct work_struct *work)
@@ -936,6 +938,7 @@ static void xhci_dbc_handle_events(struct work_struct *work)
struct xhci_dbc *dbc;
unsigned long flags;
unsigned int poll_interval;
+ unsigned long busypoll_timelimit;
dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
poll_interval = dbc->poll_interval;
@@ -954,11 +957,21 @@ static void xhci_dbc_handle_events(struct work_struct *work)
dbc->driver->disconnect(dbc);
break;
case EVT_DONE:
- /* set fast poll rate if there are pending data transfers */
+ /*
+ * Set fast poll rate if there are pending out transfers, or
+ * a transfer was recently processed
+ */
+ busypoll_timelimit = dbc->xfer_timestamp +
+ msecs_to_jiffies(DBC_XFER_INACTIVITY_TIMEOUT);
+
if (!list_empty(&dbc->eps[BULK_OUT].list_pending) ||
- !list_empty(&dbc->eps[BULK_IN].list_pending))
+ time_is_after_jiffies(busypoll_timelimit))
poll_interval = 0;
break;
+ case EVT_XFER_DONE:
+ dbc->xfer_timestamp = jiffies;
+ poll_interval = 0;
+ break;
default:
dev_info(dbc->dev, "stop handling dbc events\n");
return;
diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h
index 9dc8f4d8077c..47ac72c2286d 100644
--- a/drivers/usb/host/xhci-dbgcap.h
+++ b/drivers/usb/host/xhci-dbgcap.h
@@ -96,6 +96,7 @@ struct dbc_ep {
#define DBC_WRITE_BUF_SIZE 8192
#define DBC_POLL_INTERVAL_DEFAULT 64 /* milliseconds */
#define DBC_POLL_INTERVAL_MAX 5000 /* milliseconds */
+#define DBC_XFER_INACTIVITY_TIMEOUT 10 /* milliseconds */
/*
* Private structure for DbC hardware state:
*/
@@ -142,6 +143,7 @@ struct xhci_dbc {
enum dbc_state state;
struct delayed_work event_work;
unsigned int poll_interval; /* ms */
+ unsigned long xfer_timestamp;
unsigned resume_required:1;
struct dbc_ep eps[2];
@@ -187,6 +189,7 @@ struct dbc_request {
enum evtreturn {
EVT_ERR = -1,
EVT_DONE,
+ EVT_XFER_DONE,
EVT_GSER,
EVT_DISC,
};
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 69c278b64084..71e4c4ca6ad5 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1878,9 +1878,10 @@ int xhci_bus_resume(struct usb_hcd *hcd)
int max_ports, port_index;
int sret;
u32 next_state;
- u32 temp, portsc;
+ u32 portsc;
struct xhci_hub *rhub;
struct xhci_port **ports;
+ bool disabled_irq = false;
rhub = xhci_get_rhub(hcd);
ports = rhub->ports;
@@ -1896,17 +1897,20 @@ int xhci_bus_resume(struct usb_hcd *hcd)
return -ESHUTDOWN;
}
- /* delay the irqs */
- temp = readl(&xhci->op_regs->command);
- temp &= ~CMD_EIE;
- writel(temp, &xhci->op_regs->command);
-
/* bus specific resume for ports we suspended at bus_suspend */
- if (hcd->speed >= HCD_USB3)
+ if (hcd->speed >= HCD_USB3) {
next_state = XDEV_U0;
- else
+ } else {
next_state = XDEV_RESUME;
-
+ if (bus_state->bus_suspended) {
+ /*
+ * prevent port event interrupts from interfering
+ * with usb2 port resume process
+ */
+ xhci_disable_interrupter(xhci->interrupters[0]);
+ disabled_irq = true;
+ }
+ }
port_index = max_ports;
while (port_index--) {
portsc = readl(ports[port_index]->addr);
@@ -1974,11 +1978,9 @@ int xhci_bus_resume(struct usb_hcd *hcd)
(void) readl(&xhci->op_regs->command);
bus_state->next_statechange = jiffies + msecs_to_jiffies(5);
- /* re-enable irqs */
- temp = readl(&xhci->op_regs->command);
- temp |= CMD_EIE;
- writel(temp, &xhci->op_regs->command);
- temp = readl(&xhci->op_regs->command);
+ /* re-enable interrupter */
+ if (disabled_irq)
+ xhci_enable_interrupter(xhci->interrupters[0]);
spin_unlock_irqrestore(&xhci->lock, flags);
return 0;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 9aa7e2a876ec..d698095fc88d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1953,7 +1953,6 @@ no_bw:
xhci->interrupters = NULL;
xhci->page_size = 0;
- xhci->page_shift = 0;
xhci->usb2_rhub.bus_state.bus_suspended = 0;
xhci->usb3_rhub.bus_state.bus_suspended = 0;
}
@@ -2372,6 +2371,22 @@ xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs,
}
EXPORT_SYMBOL_GPL(xhci_create_secondary_interrupter);
+static void xhci_hcd_page_size(struct xhci_hcd *xhci)
+{
+ u32 page_size;
+
+ page_size = readl(&xhci->op_regs->page_size) & XHCI_PAGE_SIZE_MASK;
+ if (!is_power_of_2(page_size)) {
+ xhci_warn(xhci, "Invalid page size register = 0x%x\n", page_size);
+ /* Fallback to 4K page size, since that's common */
+ page_size = 1;
+ }
+
+ xhci->page_size = page_size << 12;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "HCD page size set to %iK",
+ xhci->page_size >> 10);
+}
+
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
struct xhci_interrupter *ir;
@@ -2379,7 +2394,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
dma_addr_t dma;
unsigned int val, val2;
u64 val_64;
- u32 page_size, temp;
+ u32 temp;
int i;
INIT_LIST_HEAD(&xhci->cmd_list);
@@ -2388,20 +2403,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
init_completion(&xhci->cmd_ring_stop_completion);
- page_size = readl(&xhci->op_regs->page_size);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Supported page size register = 0x%x", page_size);
- val = ffs(page_size) - 1;
- if (val < 16)
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Supported page size of %iK", (1 << (val + 12)) / 1024);
- else
- xhci_warn(xhci, "WARN: no supported page size\n");
- /* Use 4K pages, since that's common and the minimum the HC supports */
- xhci->page_shift = 12;
- xhci->page_size = 1 << xhci->page_shift;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "HCD page size set to %iK", xhci->page_size / 1024);
+ xhci_hcd_page_size(xhci);
/*
* Program the Number of Device Slots Enabled field in the CONFIG
diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c
index 87f1597a0e5a..257e4d79971f 100644
--- a/drivers/usb/host/xhci-mvebu.c
+++ b/drivers/usb/host/xhci-mvebu.c
@@ -73,13 +73,3 @@ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
return 0;
}
-
-int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
-{
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-
- /* Without reset on resume, the HC won't work at all */
- xhci->quirks |= XHCI_RESET_ON_RESUME;
-
- return 0;
-}
diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h
index 3be021793cc8..9d26e22c4842 100644
--- a/drivers/usb/host/xhci-mvebu.h
+++ b/drivers/usb/host/xhci-mvebu.h
@@ -12,16 +12,10 @@ struct usb_hcd;
#if IS_ENABLED(CONFIG_USB_XHCI_MVEBU)
int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd);
-int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd);
#else
static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
{
return 0;
}
-
-static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
-{
- return 0;
-}
#endif
#endif /* __LINUX_XHCI_MVEBU_H */
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index d85ffa9ffaa7..ff813dca2d1d 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -106,7 +106,7 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada = {
};
static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = {
- .init_quirk = xhci_mvebu_a3700_init_quirk,
+ .quirks = XHCI_RESET_ON_RESUME,
};
static const struct xhci_plat_priv xhci_plat_brcm = {
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 5e89e9cdcec2..073b0acd8a74 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1164,7 +1164,14 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
*/
switch (GET_EP_CTX_STATE(ep_ctx)) {
case EP_STATE_HALTED:
- xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n");
+ xhci_dbg(xhci, "Stop ep completion raced with stall\n");
+ /*
+ * If the halt happened before Stop Endpoint failed, its transfer event
+ * should have already been handled and Reset Endpoint should be pending.
+ */
+ if (ep->ep_state & EP_HALTED)
+ goto reset_done;
+
if (ep->ep_state & EP_HAS_STREAMS) {
reset_type = EP_SOFT_RESET;
} else {
@@ -1175,8 +1182,11 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
}
/* reset ep, reset handler cleans up cancelled tds */
err = xhci_handle_halted_endpoint(xhci, ep, td, reset_type);
+ xhci_dbg(xhci, "Stop ep completion resetting ep, status %d\n", err);
if (err)
break;
+reset_done:
+ /* Reset EP handler will clean up cancelled TDs */
ep->ep_state &= ~EP_STOP_CMD_PENDING;
return;
case EP_STATE_STOPPED:
@@ -1198,16 +1208,19 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
* Stopped state, but it will soon change to Running.
*
* Assume this bug on unexpected Stop Endpoint failures.
- * Keep retrying until the EP starts and stops again, on
- * chips where this is known to help. Wait for 100ms.
+ * Keep retrying until the EP starts and stops again.
*/
- if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
- break;
fallthrough;
case EP_STATE_RUNNING:
/* Race, HW handled stop ep cmd before ep was running */
xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n",
GET_EP_CTX_STATE(ep_ctx));
+ /*
+ * Don't retry forever if we guessed wrong or a defective HC never starts
+ * the EP or says 'Running' but fails the command. We must give back TDs.
+ */
+ if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
+ break;
command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
if (!command) {
@@ -2644,6 +2657,22 @@ static int handle_transferless_tx_event(struct xhci_hcd *xhci, struct xhci_virt_
return 0;
}
+static bool xhci_spurious_success_tx_event(struct xhci_hcd *xhci,
+ struct xhci_ring *ring)
+{
+ switch (ring->old_trb_comp_code) {
+ case COMP_SHORT_PACKET:
+ return xhci->quirks & XHCI_SPURIOUS_SUCCESS;
+ case COMP_USB_TRANSACTION_ERROR:
+ case COMP_BABBLE_DETECTED_ERROR:
+ case COMP_ISOCH_BUFFER_OVERRUN:
+ return xhci->quirks & XHCI_ETRON_HOST &&
+ ring->type == TYPE_ISOC;
+ default:
+ return false;
+ }
+}
+
/*
* If this function returns an error condition, it means it got a Transfer
* event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
@@ -2664,6 +2693,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
int status = -EINPROGRESS;
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
+ bool ring_xrun_event = false;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
@@ -2697,8 +2727,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
case COMP_SUCCESS:
if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
trb_comp_code = COMP_SHORT_PACKET;
- xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td short %d\n",
- slot_id, ep_index, ep_ring->last_td_was_short);
+ xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td comp code %d\n",
+ slot_id, ep_index, ep_ring->old_trb_comp_code);
}
break;
case COMP_SHORT_PACKET:
@@ -2770,14 +2800,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* Underrun Event for OUT Isoch endpoint.
*/
xhci_dbg(xhci, "Underrun event on slot %u ep %u\n", slot_id, ep_index);
- if (ep->skip)
- break;
- return 0;
+ ring_xrun_event = true;
+ break;
case COMP_RING_OVERRUN:
xhci_dbg(xhci, "Overrun event on slot %u ep %u\n", slot_id, ep_index);
- if (ep->skip)
- break;
- return 0;
+ ring_xrun_event = true;
+ break;
case COMP_MISSED_SERVICE_ERROR:
/*
* When encounter missed service error, one or more isoc tds
@@ -2789,7 +2817,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
xhci_dbg(xhci,
"Miss service interval error for slot %u ep %u, set skip flag\n",
slot_id, ep_index);
- return 0;
+ break;
case COMP_NO_PING_RESPONSE_ERROR:
ep->skip = true;
xhci_dbg(xhci,
@@ -2837,6 +2865,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
xhci_dequeue_td(xhci, td, ep_ring, td->status);
}
+ /* Missed TDs will be skipped on the next event */
+ if (trb_comp_code == COMP_MISSED_SERVICE_ERROR)
+ return 0;
+
if (list_empty(&ep_ring->td_list)) {
/*
* Don't print wanings if ring is empty due to a stopped endpoint generating an
@@ -2846,7 +2878,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
*/
if (trb_comp_code != COMP_STOPPED &&
trb_comp_code != COMP_STOPPED_LENGTH_INVALID &&
- !ep_ring->last_td_was_short) {
+ !ring_xrun_event &&
+ !xhci_spurious_success_tx_event(xhci, ep_ring)) {
xhci_warn(xhci, "Event TRB for slot %u ep %u with no TDs queued\n",
slot_id, ep_index);
}
@@ -2880,6 +2913,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
goto check_endpoint_halted;
}
+ /* TD was queued after xrun, maybe xrun was on a link, don't panic yet */
+ if (ring_xrun_event)
+ return 0;
+
/*
* Skip the Force Stopped Event. The 'ep_trb' of FSE is not in the current
* TD pointed by 'ep_ring->dequeue' because that the hardware dequeue
@@ -2894,11 +2931,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/*
* Some hosts give a spurious success event after a short
- * transfer. Ignore it.
+ * transfer or error on last TRB. Ignore it.
*/
- if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
- ep_ring->last_td_was_short) {
- ep_ring->last_td_was_short = false;
+ if (xhci_spurious_success_tx_event(xhci, ep_ring)) {
+ xhci_dbg(xhci, "Spurious event dma %pad, comp_code %u after %u\n",
+ &ep_trb_dma, trb_comp_code, ep_ring->old_trb_comp_code);
+ ep_ring->old_trb_comp_code = 0;
return 0;
}
@@ -2926,10 +2964,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
*/
} while (ep->skip);
- if (trb_comp_code == COMP_SHORT_PACKET)
- ep_ring->last_td_was_short = true;
- else
- ep_ring->last_td_was_short = false;
+ ep_ring->old_trb_comp_code = trb_comp_code;
+
+ /* Get out if a TD was queued at enqueue after the xrun occurred */
+ if (ring_xrun_event)
+ return 0;
ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / sizeof(*ep_trb)];
trace_xhci_handle_transfer(ep_ring, (struct xhci_generic_trb *) ep_trb, ep_trb_dma);
@@ -3780,7 +3819,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
* enqueue a No Op TRB, this can prevent the Setup and Data Stage
* TRB to be breaked by the Link TRB.
*/
- if (trb_is_link(ep_ring->enqueue + 1)) {
+ if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue + 1)) {
field = TRB_TYPE(TRB_TR_NOOP) | ep_ring->cycle_state;
queue_trb(xhci, ep_ring, false, 0, 0,
TRB_INTR_TARGET(0), field);
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 22dc86fb5254..70ec36e4ff5f 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1364,6 +1364,7 @@ static void tegra_xhci_id_work(struct work_struct *work)
tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(tegra->padctl,
tegra->otg_usb2_port);
+ pm_runtime_get_sync(tegra->dev);
if (tegra->host_mode) {
/* switch to host mode */
if (tegra->otg_usb3_port >= 0) {
@@ -1393,6 +1394,7 @@ static void tegra_xhci_id_work(struct work_struct *work)
}
tegra_xhci_set_port_power(tegra, true, true);
+ pm_runtime_mark_last_busy(tegra->dev);
} else {
if (tegra->otg_usb3_port >= 0)
@@ -1400,6 +1402,7 @@ static void tegra_xhci_id_work(struct work_struct *work)
tegra_xhci_set_port_power(tegra, true, false);
}
+ pm_runtime_put_autosuspend(tegra->dev);
}
#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_PM_SLEEP)
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1a90ebc8a30e..72070f7e6a76 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -322,7 +322,7 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
xhci_info(xhci, "Fault detected\n");
}
-static int xhci_enable_interrupter(struct xhci_interrupter *ir)
+int xhci_enable_interrupter(struct xhci_interrupter *ir)
{
u32 iman;
@@ -335,7 +335,7 @@ static int xhci_enable_interrupter(struct xhci_interrupter *ir)
return 0;
}
-static int xhci_disable_interrupter(struct xhci_interrupter *ir)
+int xhci_disable_interrupter(struct xhci_interrupter *ir)
{
u32 iman;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 59c6c1c701b9..7d22617417fe 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -211,6 +211,9 @@ struct xhci_op_regs {
#define CONFIG_CIE (1 << 9)
/* bits 10:31 - reserved and should be preserved */
+/* bits 15:0 - HCD page shift bit */
+#define XHCI_PAGE_SIZE_MASK 0xffff
+
/**
* struct xhci_intr_reg - Interrupt Register Set
* @irq_pending: IMAN - Interrupt Management Register. Used to enable
@@ -1371,7 +1374,7 @@ struct xhci_ring {
unsigned int num_trbs_free; /* used only by xhci DbC */
unsigned int bounce_buf_len;
enum xhci_ring_type type;
- bool last_td_was_short;
+ u32 old_trb_comp_code;
struct radix_tree_root *trb_address_map;
};
@@ -1514,10 +1517,7 @@ struct xhci_hcd {
u16 max_interrupters;
/* imod_interval in ns (I * 250ns) */
u32 imod_interval;
- /* 4KB min, 128MB max */
- int page_size;
- /* Valid values are 12 to 20, inclusive */
- int page_shift;
+ u32 page_size;
/* MSI-X/MSI vectors */
int nvecs;
/* optional clocks */
@@ -1890,6 +1890,8 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci,
struct usb_tt *tt, gfp_t mem_flags);
int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
u32 imod_interval);
+int xhci_enable_interrupter(struct xhci_interrupter *ir);
+int xhci_disable_interrupter(struct xhci_interrupter *ir);
/* xHCI ring, segment, TRB, and TD functions */
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
diff --git a/drivers/usb/misc/onboard_usb_dev.c b/drivers/usb/misc/onboard_usb_dev.c
index 75ac3c6aa92d..f5372dfa241a 100644
--- a/drivers/usb/misc/onboard_usb_dev.c
+++ b/drivers/usb/misc/onboard_usb_dev.c
@@ -569,8 +569,14 @@ static void onboard_dev_usbdev_disconnect(struct usb_device *udev)
}
static const struct usb_device_id onboard_dev_id_table[] = {
- { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6504) }, /* CYUSB33{0,1,2}x/CYUSB230x 3.0 HUB */
- { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6506) }, /* CYUSB33{0,1,2}x/CYUSB230x 2.0 HUB */
+ { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6500) }, /* CYUSB330x 3.0 HUB */
+ { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6502) }, /* CYUSB330x 2.0 HUB */
+ { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6503) }, /* CYUSB33{0,1}x 2.0 HUB, Vendor Mode */
+ { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6504) }, /* CYUSB331x 3.0 HUB */
+ { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6506) }, /* CYUSB331x 2.0 HUB */
+ { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6507) }, /* CYUSB332x 2.0 HUB, Vendor Mode */
+ { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6508) }, /* CYUSB332x 3.0 HUB */
+ { USB_DEVICE(VENDOR_ID_CYPRESS, 0x650a) }, /* CYUSB332x 2.0 HUB */
{ USB_DEVICE(VENDOR_ID_CYPRESS, 0x6570) }, /* CY7C6563x 2.0 HUB */
{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 HUB */
{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 HUB */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 9b34e23b7091..6ac7a0a5cf07 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1093,6 +1093,8 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 1) },
{ USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 2) },
{ USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 3) },
+ /* Abacus Electrics */
+ { USB_DEVICE(FTDI_VID, ABACUS_OPTICAL_PROBE_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 52be47d684ea..9acb6f837327 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -443,6 +443,11 @@
#define LINX_FUTURE_2_PID 0xF44C /* Linx future device */
/*
+ * Abacus Electrics
+ */
+#define ABACUS_OPTICAL_PROBE_PID 0xf458 /* ABACUS ELECTRICS Optical Probe */
+
+/*
* Oceanic product ids
*/
#define FTDI_OCEANIC_PID 0xF460 /* Oceanic dive instrument */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 5cd26dac2069..27879cc57536 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -611,6 +611,7 @@ static void option_instat_callback(struct urb *urb);
/* Sierra Wireless products */
#define SIERRA_VENDOR_ID 0x1199
#define SIERRA_PRODUCT_EM9191 0x90d3
+#define SIERRA_PRODUCT_EM9291 0x90e3
/* UNISOC (Spreadtrum) products */
#define UNISOC_VENDOR_ID 0x1782
@@ -2432,6 +2433,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
{ USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0530, 0xff), /* TCL IK512 MBIM */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 010688dd9e49..22579d0d8ab8 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -458,6 +458,8 @@ static int pl2303_detect_type(struct usb_serial *serial)
case 0x605:
case 0x700: /* GR */
case 0x705:
+ case 0x905: /* GT-2AB */
+ case 0x1005: /* GC-Q20 */
return TYPE_HXN;
}
break;
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 2c12449ff60c..a0afaf254d12 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -100,6 +100,11 @@ DEVICE(nokia, NOKIA_IDS);
{ USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */
DEVICE_N(novatel_gps, NOVATEL_IDS, 3);
+/* OWON electronic test and measurement equipment driver */
+#define OWON_IDS() \
+ { USB_DEVICE(0x5345, 0x1234) } /* HDS200 oscilloscopes and others */
+DEVICE(owon, OWON_IDS);
+
/* Siemens USB/MPI adapter */
#define SIEMENS_IDS() \
{ USB_DEVICE(0x908, 0x0004) }
@@ -134,6 +139,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
&motorola_tetra_device,
&nokia_device,
&novatel_gps_device,
+ &owon_device,
&siemens_mpi_device,
&suunto_device,
&vivopay_device,
@@ -153,6 +159,7 @@ static const struct usb_device_id id_table[] = {
MOTOROLA_TETRA_IDS(),
NOKIA_IDS(),
NOVATEL_IDS(),
+ OWON_IDS(),
SIEMENS_IDS(),
SUUNTO_IDS(),
VIVOPAY_IDS(),
diff --git a/drivers/usb/storage/debug.c b/drivers/usb/storage/debug.c
index 576be66ad962..dda610f689b7 100644
--- a/drivers/usb/storage/debug.c
+++ b/drivers/usb/storage/debug.c
@@ -58,8 +58,8 @@ void usb_stor_show_command(const struct us_data *us, struct scsi_cmnd *srb)
case INQUIRY: what = "INQUIRY"; break;
case RECOVER_BUFFERED_DATA: what = "RECOVER_BUFFERED_DATA"; break;
case MODE_SELECT: what = "MODE_SELECT"; break;
- case RESERVE: what = "RESERVE"; break;
- case RELEASE: what = "RELEASE"; break;
+ case RESERVE_6: what = "RESERVE"; break;
+ case RELEASE_6: what = "RELEASE"; break;
case COPY: what = "COPY"; break;
case ERASE: what = "ERASE"; break;
case MODE_SENSE: what = "MODE_SENSE"; break;
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 1f8c9b16a0fb..1477e31d7763 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -52,6 +52,13 @@ UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
+/* Reported-by: Zhihong Zhou <zhouzhihong@greatwall.com.cn> */
+UNUSUAL_DEV(0x0781, 0x55e8, 0x0000, 0x9999,
+ "SanDisk",
+ "",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */
UNUSUAL_DEV(0x090c, 0x2000, 0x0000, 0x9999,
"Hiksemi",
@@ -83,6 +90,13 @@ UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_LUNS),
+/* Reported-by: Oliver Neukum <oneukum@suse.com> */
+UNUSUAL_DEV(0x125f, 0xa94a, 0x0160, 0x0160,
+ "ADATA",
+ "Portable HDD CH94",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
"Initio Corporation",
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 9c76c3d0c6cf..67a533e35150 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -1052,9 +1052,11 @@ struct typec_partner *typec_register_partner(struct typec_port *port,
partner->usb_mode = USB_MODE_USB3;
}
+ mutex_lock(&port->partner_link_lock);
ret = device_register(&partner->dev);
if (ret) {
dev_err(&port->dev, "failed to register partner (%d)\n", ret);
+ mutex_unlock(&port->partner_link_lock);
put_device(&partner->dev);
return ERR_PTR(ret);
}
@@ -1063,6 +1065,7 @@ struct typec_partner *typec_register_partner(struct typec_port *port,
typec_partner_link_device(partner, port->usb2_dev);
if (port->usb3_dev)
typec_partner_link_device(partner, port->usb3_dev);
+ mutex_unlock(&port->partner_link_lock);
return partner;
}
@@ -1083,12 +1086,18 @@ void typec_unregister_partner(struct typec_partner *partner)
port = to_typec_port(partner->dev.parent);
- if (port->usb2_dev)
+ mutex_lock(&port->partner_link_lock);
+ if (port->usb2_dev) {
typec_partner_unlink_device(partner, port->usb2_dev);
- if (port->usb3_dev)
+ port->usb2_dev = NULL;
+ }
+ if (port->usb3_dev) {
typec_partner_unlink_device(partner, port->usb3_dev);
+ port->usb3_dev = NULL;
+ }
device_unregister(&partner->dev);
+ mutex_unlock(&port->partner_link_lock);
}
EXPORT_SYMBOL_GPL(typec_unregister_partner);
@@ -2041,10 +2050,11 @@ static struct typec_partner *typec_get_partner(struct typec_port *port)
static void typec_partner_attach(struct typec_connector *con, struct device *dev)
{
struct typec_port *port = container_of(con, struct typec_port, con);
- struct typec_partner *partner = typec_get_partner(port);
+ struct typec_partner *partner;
struct usb_device *udev = to_usb_device(dev);
enum usb_mode usb_mode;
+ mutex_lock(&port->partner_link_lock);
if (udev->speed < USB_SPEED_SUPER) {
usb_mode = USB_MODE_USB2;
port->usb2_dev = dev;
@@ -2053,18 +2063,22 @@ static void typec_partner_attach(struct typec_connector *con, struct device *dev
port->usb3_dev = dev;
}
+ partner = typec_get_partner(port);
if (partner) {
typec_partner_set_usb_mode(partner, usb_mode);
typec_partner_link_device(partner, dev);
put_device(&partner->dev);
}
+ mutex_unlock(&port->partner_link_lock);
}
static void typec_partner_deattach(struct typec_connector *con, struct device *dev)
{
struct typec_port *port = container_of(con, struct typec_port, con);
- struct typec_partner *partner = typec_get_partner(port);
+ struct typec_partner *partner;
+ mutex_lock(&port->partner_link_lock);
+ partner = typec_get_partner(port);
if (partner) {
typec_partner_unlink_device(partner, dev);
put_device(&partner->dev);
@@ -2074,6 +2088,7 @@ static void typec_partner_deattach(struct typec_connector *con, struct device *d
port->usb2_dev = NULL;
else if (port->usb3_dev == dev)
port->usb3_dev = NULL;
+ mutex_unlock(&port->partner_link_lock);
}
/**
@@ -2614,6 +2629,7 @@ struct typec_port *typec_register_port(struct device *parent,
ida_init(&port->mode_ids);
mutex_init(&port->port_type_lock);
+ mutex_init(&port->partner_link_lock);
port->id = id;
port->ops = cap->ops;
diff --git a/drivers/usb/typec/class.h b/drivers/usb/typec/class.h
index b3076a24ad2e..db2fe96c48ff 100644
--- a/drivers/usb/typec/class.h
+++ b/drivers/usb/typec/class.h
@@ -59,6 +59,7 @@ struct typec_port {
enum typec_port_type port_type;
enum usb_mode usb_mode;
struct mutex port_type_lock;
+ struct mutex partner_link_lock;
enum typec_orientation orientation;
struct typec_switch *sw;
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 62ca4a0ec55b..65d2b9754479 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -5965,7 +5965,7 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
case SNK_TRY_WAIT_DEBOUNCE:
if (!tcpm_port_is_sink(port)) {
port->max_wait = 0;
- tcpm_set_state(port, SRC_TRYWAIT, 0);
+ tcpm_set_state(port, SRC_TRYWAIT, PD_T_PD_DEBOUNCE);
}
break;
case SRC_TRY_WAIT:
diff --git a/drivers/usb/typec/ucsi/cros_ec_ucsi.c b/drivers/usb/typec/ucsi/cros_ec_ucsi.c
index c605c8616726..744f0709a40e 100644
--- a/drivers/usb/typec/ucsi/cros_ec_ucsi.c
+++ b/drivers/usb/typec/ucsi/cros_ec_ucsi.c
@@ -105,12 +105,13 @@ static int cros_ucsi_async_control(struct ucsi *ucsi, u64 cmd)
return 0;
}
-static int cros_ucsi_sync_control(struct ucsi *ucsi, u64 cmd)
+static int cros_ucsi_sync_control(struct ucsi *ucsi, u64 cmd, u32 *cci,
+ void *data, size_t size)
{
struct cros_ucsi_data *udata = ucsi_get_drvdata(ucsi);
int ret;
- ret = ucsi_sync_control_common(ucsi, cmd);
+ ret = ucsi_sync_control_common(ucsi, cmd, cci, data, size);
switch (ret) {
case -EBUSY:
/* EC may return -EBUSY if CCI.busy is set.
diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
index 420af5139c70..8aae80b457d7 100644
--- a/drivers/usb/typec/ucsi/displayport.c
+++ b/drivers/usb/typec/ucsi/displayport.c
@@ -54,7 +54,8 @@ static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo)
u8 cur = 0;
int ret;
- mutex_lock(&dp->con->lock);
+ if (!ucsi_con_mutex_lock(dp->con))
+ return -ENOTCONN;
if (!dp->override && dp->initialized) {
const struct typec_altmode *p = typec_altmode_get_partner(alt);
@@ -100,7 +101,7 @@ static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo)
schedule_work(&dp->work);
ret = 0;
err_unlock:
- mutex_unlock(&dp->con->lock);
+ ucsi_con_mutex_unlock(dp->con);
return ret;
}
@@ -112,7 +113,8 @@ static int ucsi_displayport_exit(struct typec_altmode *alt)
u64 command;
int ret = 0;
- mutex_lock(&dp->con->lock);
+ if (!ucsi_con_mutex_lock(dp->con))
+ return -ENOTCONN;
if (!dp->override) {
const struct typec_altmode *p = typec_altmode_get_partner(alt);
@@ -144,7 +146,7 @@ static int ucsi_displayport_exit(struct typec_altmode *alt)
schedule_work(&dp->work);
out_unlock:
- mutex_unlock(&dp->con->lock);
+ ucsi_con_mutex_unlock(dp->con);
return ret;
}
@@ -202,20 +204,21 @@ static int ucsi_displayport_vdm(struct typec_altmode *alt,
int cmd = PD_VDO_CMD(header);
int svdm_version;
- mutex_lock(&dp->con->lock);
+ if (!ucsi_con_mutex_lock(dp->con))
+ return -ENOTCONN;
if (!dp->override && dp->initialized) {
const struct typec_altmode *p = typec_altmode_get_partner(alt);
dev_warn(&p->dev,
"firmware doesn't support alternate mode overriding\n");
- mutex_unlock(&dp->con->lock);
+ ucsi_con_mutex_unlock(dp->con);
return -EOPNOTSUPP;
}
svdm_version = typec_altmode_get_svdm_version(alt);
if (svdm_version < 0) {
- mutex_unlock(&dp->con->lock);
+ ucsi_con_mutex_unlock(dp->con);
return svdm_version;
}
@@ -259,7 +262,7 @@ static int ucsi_displayport_vdm(struct typec_altmode *alt,
break;
}
- mutex_unlock(&dp->con->lock);
+ ucsi_con_mutex_unlock(dp->con);
return 0;
}
@@ -296,6 +299,8 @@ void ucsi_displayport_remove_partner(struct typec_altmode *alt)
if (!dp)
return;
+ cancel_work_sync(&dp->work);
+
dp->data.conf = 0;
dp->data.status = 0;
dp->initialized = false;
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 2a2915b0a645..01ce858a1a2b 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -55,7 +55,8 @@ void ucsi_notify_common(struct ucsi *ucsi, u32 cci)
}
EXPORT_SYMBOL_GPL(ucsi_notify_common);
-int ucsi_sync_control_common(struct ucsi *ucsi, u64 command)
+int ucsi_sync_control_common(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *data, size_t size)
{
bool ack = UCSI_COMMAND(command) == UCSI_ACK_CC_CI;
int ret;
@@ -80,6 +81,13 @@ out_clear_bit:
else
clear_bit(COMMAND_PENDING, &ucsi->flags);
+ if (!ret && cci)
+ ret = ucsi->ops->read_cci(ucsi, cci);
+
+ if (!ret && data &&
+ (*cci & UCSI_CCI_COMMAND_COMPLETE))
+ ret = ucsi->ops->read_message_in(ucsi, data, size);
+
return ret;
}
EXPORT_SYMBOL_GPL(ucsi_sync_control_common);
@@ -95,7 +103,7 @@ static int ucsi_acknowledge(struct ucsi *ucsi, bool conn_ack)
ctrl |= UCSI_ACK_CONNECTOR_CHANGE;
}
- return ucsi->ops->sync_control(ucsi, ctrl);
+ return ucsi->ops->sync_control(ucsi, ctrl, NULL, NULL, 0);
}
static int ucsi_run_command(struct ucsi *ucsi, u64 command, u32 *cci,
@@ -108,9 +116,7 @@ static int ucsi_run_command(struct ucsi *ucsi, u64 command, u32 *cci,
if (size > UCSI_MAX_DATA_LENGTH(ucsi))
return -EINVAL;
- ret = ucsi->ops->sync_control(ucsi, command);
- if (ucsi->ops->read_cci(ucsi, cci))
- return -EIO;
+ ret = ucsi->ops->sync_control(ucsi, command, cci, data, size);
if (*cci & UCSI_CCI_BUSY)
return ucsi_run_command(ucsi, UCSI_CANCEL, cci, NULL, 0, false) ?: -EBUSY;
@@ -127,9 +133,6 @@ static int ucsi_run_command(struct ucsi *ucsi, u64 command, u32 *cci,
else
err = 0;
- if (!err && data && UCSI_CCI_LENGTH(*cci))
- err = ucsi->ops->read_message_in(ucsi, data, size);
-
/*
* Don't ACK connection change if there was an error.
*/
@@ -1920,6 +1923,40 @@ void ucsi_set_drvdata(struct ucsi *ucsi, void *data)
EXPORT_SYMBOL_GPL(ucsi_set_drvdata);
/**
+ * ucsi_con_mutex_lock - Acquire the connector mutex
+ * @con: The connector interface to lock
+ *
+ * Returns true on success, false if the connector is disconnected
+ */
+bool ucsi_con_mutex_lock(struct ucsi_connector *con)
+{
+ bool mutex_locked = false;
+ bool connected = true;
+
+ while (connected && !mutex_locked) {
+ mutex_locked = mutex_trylock(&con->lock) != 0;
+ connected = UCSI_CONSTAT(con, CONNECTED);
+ if (connected && !mutex_locked)
+ msleep(20);
+ }
+
+ connected = connected && con->partner;
+ if (!connected && mutex_locked)
+ mutex_unlock(&con->lock);
+
+ return connected;
+}
+
+/**
+ * ucsi_con_mutex_unlock - Release the connector mutex
+ * @con: The connector interface to unlock
+ */
+void ucsi_con_mutex_unlock(struct ucsi_connector *con)
+{
+ mutex_unlock(&con->lock);
+}
+
+/**
* ucsi_create - Allocate UCSI instance
* @dev: Device interface to the PPM (Platform Policy Manager)
* @ops: I/O routines
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 28780acc4af2..753888cf9f89 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -79,7 +79,8 @@ struct ucsi_operations {
int (*read_cci)(struct ucsi *ucsi, u32 *cci);
int (*poll_cci)(struct ucsi *ucsi, u32 *cci);
int (*read_message_in)(struct ucsi *ucsi, void *val, size_t val_len);
- int (*sync_control)(struct ucsi *ucsi, u64 command);
+ int (*sync_control)(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *data, size_t size);
int (*async_control)(struct ucsi *ucsi, u64 command);
bool (*update_altmodes)(struct ucsi *ucsi, struct ucsi_altmode *orig,
struct ucsi_altmode *updated);
@@ -93,6 +94,8 @@ int ucsi_register(struct ucsi *ucsi);
void ucsi_unregister(struct ucsi *ucsi);
void *ucsi_get_drvdata(struct ucsi *ucsi);
void ucsi_set_drvdata(struct ucsi *ucsi, void *data);
+bool ucsi_con_mutex_lock(struct ucsi_connector *con);
+void ucsi_con_mutex_unlock(struct ucsi_connector *con);
void ucsi_connector_change(struct ucsi *ucsi, u8 num);
@@ -429,7 +432,7 @@ struct ucsi_debugfs_entry {
u64 low;
u64 high;
} response;
- u32 status;
+ int status;
struct dentry *dentry;
};
@@ -531,7 +534,8 @@ void ucsi_altmode_update_active(struct ucsi_connector *con);
int ucsi_resume(struct ucsi *ucsi);
void ucsi_notify_common(struct ucsi *ucsi, u32 cci);
-int ucsi_sync_control_common(struct ucsi *ucsi, u64 command);
+int ucsi_sync_control_common(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *data, size_t size);
#if IS_ENABLED(CONFIG_POWER_SUPPLY)
int ucsi_register_port_psy(struct ucsi_connector *con);
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index ac1ebb5d9527..0ac6e5ce4a28 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -128,12 +128,13 @@ static int ucsi_gram_read_message_in(struct ucsi *ucsi, void *val, size_t val_le
return ret;
}
-static int ucsi_gram_sync_control(struct ucsi *ucsi, u64 command)
+static int ucsi_gram_sync_control(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *data, size_t size)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
int ret;
- ret = ucsi_sync_control_common(ucsi, command);
+ ret = ucsi_sync_control_common(ucsi, command, cci, data, size);
if (ret < 0)
return ret;
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 511dd1b224ae..c1d776c82fc2 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -222,7 +222,6 @@ struct ucsi_ccg {
u16 fw_build;
struct work_struct pm_work;
- u64 last_cmd_sent;
bool has_multiple_dp;
struct ucsi_ccg_altmode orig[UCSI_MAX_ALTMODES];
struct ucsi_ccg_altmode updated[UCSI_MAX_ALTMODES];
@@ -538,9 +537,10 @@ static void ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg *uc,
* first and then vdo=0x3
*/
static void ucsi_ccg_nvidia_altmode(struct ucsi_ccg *uc,
- struct ucsi_altmode *alt)
+ struct ucsi_altmode *alt,
+ u64 command)
{
- switch (UCSI_ALTMODE_OFFSET(uc->last_cmd_sent)) {
+ switch (UCSI_ALTMODE_OFFSET(command)) {
case NVIDIA_FTB_DP_OFFSET:
if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DBG_VDO)
alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DP_VDO |
@@ -578,37 +578,11 @@ static int ucsi_ccg_read_cci(struct ucsi *ucsi, u32 *cci)
static int ucsi_ccg_read_message_in(struct ucsi *ucsi, void *val, size_t val_len)
{
struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
- struct ucsi_capability *cap;
- struct ucsi_altmode *alt;
spin_lock(&uc->op_lock);
memcpy(val, uc->op_data.message_in, val_len);
spin_unlock(&uc->op_lock);
- switch (UCSI_COMMAND(uc->last_cmd_sent)) {
- case UCSI_GET_CURRENT_CAM:
- if (uc->has_multiple_dp)
- ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)val);
- break;
- case UCSI_GET_ALTERNATE_MODES:
- if (UCSI_ALTMODE_RECIPIENT(uc->last_cmd_sent) ==
- UCSI_RECIPIENT_SOP) {
- alt = val;
- if (alt[0].svid == USB_TYPEC_NVIDIA_VLINK_SID)
- ucsi_ccg_nvidia_altmode(uc, alt);
- }
- break;
- case UCSI_GET_CAPABILITY:
- if (uc->fw_build == CCG_FW_BUILD_NVIDIA_TEGRA) {
- cap = val;
- cap->features &= ~UCSI_CAP_ALT_MODE_DETAILS;
- }
- break;
- default:
- break;
- }
- uc->last_cmd_sent = 0;
-
return 0;
}
@@ -628,7 +602,8 @@ static int ucsi_ccg_async_control(struct ucsi *ucsi, u64 command)
return ccg_write(uc, reg, (u8 *)&command, sizeof(command));
}
-static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command)
+static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *data, size_t size)
{
struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
struct ucsi_connector *con;
@@ -638,11 +613,9 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command)
mutex_lock(&uc->lock);
pm_runtime_get_sync(uc->dev);
- uc->last_cmd_sent = command;
-
- if (UCSI_COMMAND(uc->last_cmd_sent) == UCSI_SET_NEW_CAM &&
+ if (UCSI_COMMAND(command) == UCSI_SET_NEW_CAM &&
uc->has_multiple_dp) {
- con_index = (uc->last_cmd_sent >> 16) &
+ con_index = (command >> 16) &
UCSI_CMD_CONNECTOR_MASK;
if (con_index == 0) {
ret = -EINVAL;
@@ -652,7 +625,31 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command)
ucsi_ccg_update_set_new_cam_cmd(uc, con, &command);
}
- ret = ucsi_sync_control_common(ucsi, command);
+ ret = ucsi_sync_control_common(ucsi, command, cci, data, size);
+
+ switch (UCSI_COMMAND(command)) {
+ case UCSI_GET_CURRENT_CAM:
+ if (uc->has_multiple_dp)
+ ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)data);
+ break;
+ case UCSI_GET_ALTERNATE_MODES:
+ if (UCSI_ALTMODE_RECIPIENT(command) == UCSI_RECIPIENT_SOP) {
+ struct ucsi_altmode *alt = data;
+
+ if (alt[0].svid == USB_TYPEC_NVIDIA_VLINK_SID)
+ ucsi_ccg_nvidia_altmode(uc, alt, command);
+ }
+ break;
+ case UCSI_GET_CAPABILITY:
+ if (uc->fw_build == CCG_FW_BUILD_NVIDIA_TEGRA) {
+ struct ucsi_capability *cap = data;
+
+ cap->features &= ~UCSI_CAP_ALT_MODE_DETAILS;
+ }
+ break;
+ default:
+ break;
+ }
err_put:
pm_runtime_put_sync(uc->dev);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 36099047560d..cccc49a08a1a 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -3884,6 +3884,9 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
ndev->mvdev.max_vqs = max_vqs;
mvdev = &ndev->mvdev;
mvdev->mdev = mdev;
+ /* cpu_to_mlx5vdpa16() below depends on this flag */
+ mvdev->actual_features =
+ (device_features & BIT_ULL(VIRTIO_F_VERSION_1));
ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL);
ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL);
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 94142581c98c..14437396d721 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -1814,7 +1814,8 @@ int vfio_config_init(struct vfio_pci_core_device *vdev)
cpu_to_le16(PCI_COMMAND_MEMORY);
}
- if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx)
+ if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx ||
+ vdev->pdev->irq == IRQ_NOTCONNECTED)
vconfig[PCI_INTERRUPT_PIN] = 0;
ret = vfio_cap_init(vdev);
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 586e49efb81b..b8bfc909fd5d 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -727,15 +727,7 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable);
static int vfio_pci_get_irq_count(struct vfio_pci_core_device *vdev, int irq_type)
{
if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
- u8 pin;
-
- if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) ||
- vdev->nointx || vdev->pdev->is_virtfn)
- return 0;
-
- pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
-
- return pin ? 1 : 0;
+ return vdev->vconfig[PCI_INTERRUPT_PIN] ? 1 : 0;
} else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
u8 pos;
u16 flags;
@@ -1654,14 +1646,14 @@ static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf,
{
struct vm_area_struct *vma = vmf->vma;
struct vfio_pci_core_device *vdev = vma->vm_private_data;
- unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff;
+ unsigned long addr = vmf->address & ~((PAGE_SIZE << order) - 1);
+ unsigned long pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long pfn = vma_to_pfn(vma) + pgoff;
vm_fault_t ret = VM_FAULT_SIGBUS;
- pfn = vma_to_pfn(vma) + pgoff;
-
- if (order && (pfn & ((1 << order) - 1) ||
- vmf->address & ((PAGE_SIZE << order) - 1) ||
- vmf->address + (PAGE_SIZE << order) > vma->vm_end)) {
+ if (order && (addr < vma->vm_start ||
+ addr + (PAGE_SIZE << order) > vma->vm_end ||
+ pfn & ((1 << order) - 1))) {
ret = VM_FAULT_FALLBACK;
goto out;
}
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 8382c5834335..565966351dfa 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -259,7 +259,7 @@ static int vfio_intx_enable(struct vfio_pci_core_device *vdev,
if (!is_irq_none(vdev))
return -EINVAL;
- if (!pdev->irq)
+ if (!pdev->irq || pdev->irq == IRQ_NOTCONNECTED)
return -ENODEV;
name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", pci_name(pdev));
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 7aeff435c1d8..38d243d914d0 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -571,6 +571,9 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
int ret;
llnode = llist_del_all(&svq->completion_list);
+
+ mutex_lock(&svq->vq.mutex);
+
llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
se_cmd = &cmd->tvc_se_cmd;
@@ -604,6 +607,8 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
vhost_scsi_release_cmd_res(se_cmd);
}
+ mutex_unlock(&svq->vq.mutex);
+
if (signal)
vhost_signal(&svq->vs->dev, &svq->vq);
}
@@ -630,7 +635,7 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
tag = sbitmap_get(&svq->scsi_tags);
if (tag < 0) {
- pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
+ pr_warn_once("Guest sent too many cmds. Returning TASK_SET_FULL.\n");
return ERR_PTR(-ENOMEM);
}
@@ -757,7 +762,7 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
size_t len = iov_iter_count(iter);
unsigned int nbytes = 0;
struct page *page;
- int i;
+ int i, ret;
if (cmd->tvc_data_direction == DMA_FROM_DEVICE) {
cmd->saved_iter_addr = dup_iter(&cmd->saved_iter, iter,
@@ -770,6 +775,7 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
page = alloc_page(GFP_KERNEL);
if (!page) {
i--;
+ ret = -ENOMEM;
goto err;
}
@@ -777,8 +783,10 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
sg_set_page(&sg[i], page, nbytes, 0);
if (cmd->tvc_data_direction == DMA_TO_DEVICE &&
- copy_page_from_iter(page, 0, nbytes, iter) != nbytes)
+ copy_page_from_iter(page, 0, nbytes, iter) != nbytes) {
+ ret = -EFAULT;
goto err;
+ }
len -= nbytes;
}
@@ -793,7 +801,7 @@ err:
for (; i >= 0; i--)
__free_page(sg_page(&sg[i]));
kfree(cmd->saved_iter_addr);
- return -ENOMEM;
+ return ret;
}
static int
@@ -930,24 +938,69 @@ static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd)
}
static void
-vhost_scsi_send_bad_target(struct vhost_scsi *vs,
- struct vhost_virtqueue *vq,
- int head, unsigned out)
+vhost_scsi_send_status(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
+ struct vhost_scsi_ctx *vc, u8 status)
{
- struct virtio_scsi_cmd_resp __user *resp;
struct virtio_scsi_cmd_resp rsp;
+ struct iov_iter iov_iter;
int ret;
memset(&rsp, 0, sizeof(rsp));
- rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
- resp = vq->iov[out].iov_base;
- ret = __copy_to_user(resp, &rsp, sizeof(rsp));
- if (!ret)
- vhost_add_used_and_signal(&vs->dev, vq, head, 0);
+ rsp.status = status;
+
+ iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in,
+ sizeof(rsp));
+
+ ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
+
+ if (likely(ret == sizeof(rsp)))
+ vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
else
pr_err("Faulted on virtio_scsi_cmd_resp\n");
}
+#define TYPE_IO_CMD 0
+#define TYPE_CTRL_TMF 1
+#define TYPE_CTRL_AN 2
+
+static void
+vhost_scsi_send_bad_target(struct vhost_scsi *vs,
+ struct vhost_virtqueue *vq,
+ struct vhost_scsi_ctx *vc, int type)
+{
+ union {
+ struct virtio_scsi_cmd_resp cmd;
+ struct virtio_scsi_ctrl_tmf_resp tmf;
+ struct virtio_scsi_ctrl_an_resp an;
+ } rsp;
+ struct iov_iter iov_iter;
+ size_t rsp_size;
+ int ret;
+
+ memset(&rsp, 0, sizeof(rsp));
+
+ if (type == TYPE_IO_CMD) {
+ rsp_size = sizeof(struct virtio_scsi_cmd_resp);
+ rsp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
+ } else if (type == TYPE_CTRL_TMF) {
+ rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
+ rsp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
+ } else {
+ rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
+ rsp.an.response = VIRTIO_SCSI_S_BAD_TARGET;
+ }
+
+ iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in,
+ rsp_size);
+
+ ret = copy_to_iter(&rsp, rsp_size, &iov_iter);
+
+ if (likely(ret == rsp_size))
+ vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
+ else
+ pr_err("Faulted on virtio scsi type=%d\n", type);
+}
+
static int
vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
struct vhost_scsi_ctx *vc)
@@ -1216,8 +1269,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
exp_data_len + prot_bytes,
data_direction);
if (IS_ERR(cmd)) {
- vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
- PTR_ERR(cmd));
+ ret = PTR_ERR(cmd);
+ vq_err(vq, "vhost_scsi_get_tag failed %dd\n", ret);
goto err;
}
cmd->tvc_vhost = vs;
@@ -1232,9 +1285,9 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
" %d\n", cmd, exp_data_len, prot_bytes, data_direction);
if (data_direction != DMA_NONE) {
- if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
- &prot_iter, exp_data_len,
- &data_iter))) {
+ ret = vhost_scsi_mapal(cmd, prot_bytes, &prot_iter,
+ exp_data_len, &data_iter);
+ if (unlikely(ret)) {
vq_err(vq, "Failed to map iov to sgl\n");
vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
goto err;
@@ -1254,11 +1307,15 @@ err:
* EINVAL: Invalid response buffer, drop the request
* EIO: Respond with bad target
* EAGAIN: Pending request
+ * ENOMEM: Could not allocate resources for request
*/
if (ret == -ENXIO)
break;
else if (ret == -EIO)
- vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
+ vhost_scsi_send_bad_target(vs, vq, &vc, TYPE_IO_CMD);
+ else if (ret == -ENOMEM)
+ vhost_scsi_send_status(vs, vq, &vc,
+ SAM_STAT_TASK_SET_FULL);
} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
mutex_unlock(&vq->mutex);
@@ -1297,8 +1354,11 @@ static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
else
resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
+ mutex_lock(&tmf->svq->vq.mutex);
vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
tmf->vq_desc, &tmf->resp_iov, resp_code);
+ mutex_unlock(&tmf->svq->vq.mutex);
+
vhost_scsi_release_tmf_res(tmf);
}
@@ -1488,7 +1548,10 @@ err:
if (ret == -ENXIO)
break;
else if (ret == -EIO)
- vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
+ vhost_scsi_send_bad_target(vs, vq, &vc,
+ v_req.type == VIRTIO_SCSI_T_TMF ?
+ TYPE_CTRL_TMF :
+ TYPE_CTRL_AN);
} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
mutex_unlock(&vq->mutex);
diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c
index 3ff1b2a8659e..f9475c14f733 100644
--- a/drivers/video/fbdev/core/bitblit.c
+++ b/drivers/video/fbdev/core/bitblit.c
@@ -59,12 +59,11 @@ static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy,
}
static void bit_clear(struct vc_data *vc, struct fb_info *info, int sy,
- int sx, int height, int width)
+ int sx, int height, int width, int fg, int bg)
{
- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
struct fb_fillrect region;
- region.color = attr_bgcol_ec(bgshift, vc, info);
+ region.color = bg;
region.dx = sx * vc->vc_font.width;
region.dy = sy * vc->vc_font.height;
region.width = width * vc->vc_font.width;
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index e8b4e8c119b5..07d127110ca4 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -1258,7 +1258,7 @@ static void __fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
-
+ int fg, bg;
struct fbcon_display *p = &fb_display[vc->vc_num];
u_int y_break;
@@ -1279,16 +1279,18 @@ static void __fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
fbcon_clear_margins(vc, 0);
}
+ fg = get_color(vc, info, vc->vc_video_erase_char, 1);
+ bg = get_color(vc, info, vc->vc_video_erase_char, 0);
/* Split blits that cross physical y_wrap boundary */
y_break = p->vrows - p->yscroll;
if (sy < y_break && sy + height - 1 >= y_break) {
u_int b = y_break - sy;
- ops->clear(vc, info, real_y(p, sy), sx, b, width);
+ ops->clear(vc, info, real_y(p, sy), sx, b, width, fg, bg);
ops->clear(vc, info, real_y(p, sy + b), sx, height - b,
- width);
+ width, fg, bg);
} else
- ops->clear(vc, info, real_y(p, sy), sx, height, width);
+ ops->clear(vc, info, real_y(p, sy), sx, height, width, fg, bg);
}
static void fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h
index df70ea5ec5b3..4d97e6d8a16a 100644
--- a/drivers/video/fbdev/core/fbcon.h
+++ b/drivers/video/fbdev/core/fbcon.h
@@ -55,7 +55,7 @@ struct fbcon_ops {
void (*bmove)(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int dy, int dx, int height, int width);
void (*clear)(struct vc_data *vc, struct fb_info *info, int sy,
- int sx, int height, int width);
+ int sx, int height, int width, int fb, int bg);
void (*putcs)(struct vc_data *vc, struct fb_info *info,
const unsigned short *s, int count, int yy, int xx,
int fg, int bg);
@@ -116,42 +116,6 @@ static inline int mono_col(const struct fb_info *info)
return (~(0xfff << max_len)) & 0xff;
}
-static inline int attr_col_ec(int shift, struct vc_data *vc,
- struct fb_info *info, int is_fg)
-{
- int is_mono01;
- int col;
- int fg;
- int bg;
-
- if (!vc)
- return 0;
-
- if (vc->vc_can_do_color)
- return is_fg ? attr_fgcol(shift,vc->vc_video_erase_char)
- : attr_bgcol(shift,vc->vc_video_erase_char);
-
- if (!info)
- return 0;
-
- col = mono_col(info);
- is_mono01 = info->fix.visual == FB_VISUAL_MONO01;
-
- if (attr_reverse(vc->vc_video_erase_char)) {
- fg = is_mono01 ? col : 0;
- bg = is_mono01 ? 0 : col;
- }
- else {
- fg = is_mono01 ? 0 : col;
- bg = is_mono01 ? col : 0;
- }
-
- return is_fg ? fg : bg;
-}
-
-#define attr_bgcol_ec(bgshift, vc, info) attr_col_ec(bgshift, vc, info, 0)
-#define attr_fgcol_ec(fgshift, vc, info) attr_col_ec(fgshift, vc, info, 1)
-
/*
* Scroll Method
*/
diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c
index f9b794ff7d39..89ef4ba7e867 100644
--- a/drivers/video/fbdev/core/fbcon_ccw.c
+++ b/drivers/video/fbdev/core/fbcon_ccw.c
@@ -78,14 +78,13 @@ static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
}
static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy,
- int sx, int height, int width)
+ int sx, int height, int width, int fg, int bg)
{
struct fbcon_ops *ops = info->fbcon_par;
struct fb_fillrect region;
- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
u32 vyres = GETVYRES(ops->p, info);
- region.color = attr_bgcol_ec(bgshift,vc,info);
+ region.color = bg;
region.dx = sy * vc->vc_font.height;
region.dy = vyres - ((sx + width) * vc->vc_font.width);
region.height = width * vc->vc_font.width;
diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c
index 903f6fc174e1..b9dac7940fb7 100644
--- a/drivers/video/fbdev/core/fbcon_cw.c
+++ b/drivers/video/fbdev/core/fbcon_cw.c
@@ -63,14 +63,13 @@ static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
}
static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy,
- int sx, int height, int width)
+ int sx, int height, int width, int fg, int bg)
{
struct fbcon_ops *ops = info->fbcon_par;
struct fb_fillrect region;
- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
u32 vxres = GETVXRES(ops->p, info);
- region.color = attr_bgcol_ec(bgshift,vc,info);
+ region.color = bg;
region.dx = vxres - ((sy + height) * vc->vc_font.height);
region.dy = sx * vc->vc_font.width;
region.height = width * vc->vc_font.width;
diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c
index 594331936fd3..0af7913a2abd 100644
--- a/drivers/video/fbdev/core/fbcon_ud.c
+++ b/drivers/video/fbdev/core/fbcon_ud.c
@@ -64,15 +64,14 @@ static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy,
}
static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy,
- int sx, int height, int width)
+ int sx, int height, int width, int fg, int bg)
{
struct fbcon_ops *ops = info->fbcon_par;
struct fb_fillrect region;
- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
u32 vyres = GETVYRES(ops->p, info);
u32 vxres = GETVXRES(ops->p, info);
- region.color = attr_bgcol_ec(bgshift,vc,info);
+ region.color = bg;
region.dy = vyres - ((sy + height) * vc->vc_font.height);
region.dx = vxres - ((sx + width) * vc->vc_font.width);
region.width = width * vc->vc_font.width;
diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c
index eff7ec4da167..d342b90c42b7 100644
--- a/drivers/video/fbdev/core/tileblit.c
+++ b/drivers/video/fbdev/core/tileblit.c
@@ -32,16 +32,14 @@ static void tile_bmove(struct vc_data *vc, struct fb_info *info, int sy,
}
static void tile_clear(struct vc_data *vc, struct fb_info *info, int sy,
- int sx, int height, int width)
+ int sx, int height, int width, int fg, int bg)
{
struct fb_tilerect rect;
- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- int fgshift = (vc->vc_hi_font_mask) ? 9 : 8;
rect.index = vc->vc_video_erase_char &
((vc->vc_hi_font_mask) ? 0x1ff : 0xff);
- rect.fg = attr_fgcol_ec(fgshift, vc, info);
- rect.bg = attr_bgcol_ec(bgshift, vc, info);
+ rect.fg = fg;
+ rect.bg = bg;
rect.sx = sx;
rect.sy = sy;
rect.width = width;
@@ -76,7 +74,42 @@ static void tile_putcs(struct vc_data *vc, struct fb_info *info,
static void tile_clear_margins(struct vc_data *vc, struct fb_info *info,
int color, int bottom_only)
{
- return;
+ unsigned int cw = vc->vc_font.width;
+ unsigned int ch = vc->vc_font.height;
+ unsigned int rw = info->var.xres - (vc->vc_cols*cw);
+ unsigned int bh = info->var.yres - (vc->vc_rows*ch);
+ unsigned int rs = info->var.xres - rw;
+ unsigned int bs = info->var.yres - bh;
+ unsigned int vwt = info->var.xres_virtual / cw;
+ unsigned int vht = info->var.yres_virtual / ch;
+ struct fb_tilerect rect;
+
+ rect.index = vc->vc_video_erase_char &
+ ((vc->vc_hi_font_mask) ? 0x1ff : 0xff);
+ rect.fg = color;
+ rect.bg = color;
+
+ if ((int) rw > 0 && !bottom_only) {
+ rect.sx = (info->var.xoffset + rs + cw - 1) / cw;
+ rect.sy = 0;
+ rect.width = (rw + cw - 1) / cw;
+ rect.height = vht;
+ if (rect.width + rect.sx > vwt)
+ rect.width = vwt - rect.sx;
+ if (rect.sx < vwt)
+ info->tileops->fb_tilefill(info, &rect);
+ }
+
+ if ((int) bh > 0) {
+ rect.sx = info->var.xoffset / cw;
+ rect.sy = (info->var.yoffset + bs) / ch;
+ rect.width = rs / cw;
+ rect.height = (bh + ch - 1) / ch;
+ if (rect.height + rect.sy > vht)
+ rect.height = vht - rect.sy;
+ if (rect.sy < vht)
+ info->tileops->fb_tilefill(info, &rect);
+ }
}
static void tile_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index 5ac8201c3533..b71d15794ce8 100644
--- a/drivers/video/fbdev/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
@@ -1827,6 +1827,7 @@ static void fsl_diu_remove(struct platform_device *pdev)
int i;
data = dev_get_drvdata(&pdev->dev);
+ device_remove_file(&pdev->dev, &data->dev_attr);
disable_lcdc(&data->fsl_diu_info[0]);
free_irq(data->irq, data->diu_reg);
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index ba37665188b5..95d5d7993e5b 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -395,6 +395,40 @@ static const struct cpumask *virtio_irq_get_affinity(struct device *_d,
return dev->config->get_vq_affinity(dev, irq_vec);
}
+static void virtio_dev_shutdown(struct device *_d)
+{
+ struct virtio_device *dev = dev_to_virtio(_d);
+ struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+
+ /*
+ * Stop accesses to or from the device.
+ * We only need to do it if there's a driver - no accesses otherwise.
+ */
+ if (!drv)
+ return;
+
+ /* If the driver has its own shutdown method, use that. */
+ if (drv->shutdown) {
+ drv->shutdown(dev);
+ return;
+ }
+
+ /*
+ * Some devices get wedged if you kick them after they are
+ * reset. Mark all vqs as broken to make sure we don't.
+ */
+ virtio_break_device(dev);
+ /*
+ * Guarantee that any callback will see vq->broken as true.
+ */
+ virtio_synchronize_cbs(dev);
+ /*
+ * As IOMMUs are reset on shutdown, this will block device access to memory.
+ * Some devices get wedged if this happens, so reset to make sure it does not.
+ */
+ dev->config->reset(dev);
+}
+
static const struct bus_type virtio_bus = {
.name = "virtio",
.match = virtio_dev_match,
@@ -403,6 +437,7 @@ static const struct bus_type virtio_bus = {
.probe = virtio_dev_probe,
.remove = virtio_dev_remove,
.irq_get_affinity = virtio_irq_get_affinity,
+ .shutdown = virtio_dev_shutdown,
};
int __register_virtio_driver(struct virtio_driver *driver, struct module *owner)
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 5eaade757860..d50fe030d825 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -247,7 +247,7 @@ virtio_pci_admin_cmd_dev_parts_objects_enable(struct virtio_device *virtio_dev)
sg_init_one(&data_sg, get_data, sizeof(*get_data));
sg_init_one(&result_sg, result, sizeof(*result));
cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEVICE_CAP_GET);
- cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SELF);
cmd.data_sg = &data_sg;
cmd.result_sg = &result_sg;
ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
@@ -305,7 +305,7 @@ static void virtio_pci_admin_cmd_cap_init(struct virtio_device *virtio_dev)
sg_init_one(&result_sg, data, sizeof(*data));
cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY);
- cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SELF);
cmd.result_sg = &result_sg;
ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index fdd2d2b07b5a..b784aab66867 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -2650,7 +2650,7 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
struct vring_virtqueue *vq = to_vvq(_vq);
if (vq->event_triggered)
- vq->event_triggered = false;
+ data_race(vq->event_triggered = false);
return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
virtqueue_enable_cb_delayed_split(_vq);
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
index b4773a6aaf8c..837e15701c0e 100644
--- a/drivers/watchdog/aspeed_wdt.c
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -11,21 +11,30 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kstrtox.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/watchdog.h>
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+struct aspeed_wdt_scu {
+ const char *compatible;
+ u32 reset_status_reg;
+ u32 wdt_reset_mask;
+ u32 wdt_reset_mask_shift;
+};
struct aspeed_wdt_config {
u32 ext_pulse_width_mask;
u32 irq_shift;
u32 irq_mask;
+ struct aspeed_wdt_scu scu;
};
struct aspeed_wdt {
@@ -39,18 +48,36 @@ static const struct aspeed_wdt_config ast2400_config = {
.ext_pulse_width_mask = 0xff,
.irq_shift = 0,
.irq_mask = 0,
+ .scu = {
+ .compatible = "aspeed,ast2400-scu",
+ .reset_status_reg = 0x3c,
+ .wdt_reset_mask = 0x1,
+ .wdt_reset_mask_shift = 1,
+ },
};
static const struct aspeed_wdt_config ast2500_config = {
.ext_pulse_width_mask = 0xfffff,
.irq_shift = 12,
.irq_mask = GENMASK(31, 12),
+ .scu = {
+ .compatible = "aspeed,ast2500-scu",
+ .reset_status_reg = 0x3c,
+ .wdt_reset_mask = 0x1,
+ .wdt_reset_mask_shift = 2,
+ },
};
static const struct aspeed_wdt_config ast2600_config = {
.ext_pulse_width_mask = 0xfffff,
.irq_shift = 0,
.irq_mask = GENMASK(31, 10),
+ .scu = {
+ .compatible = "aspeed,ast2600-scu",
+ .reset_status_reg = 0x74,
+ .wdt_reset_mask = 0xf,
+ .wdt_reset_mask_shift = 16,
+ },
};
static const struct of_device_id aspeed_wdt_of_table[] = {
@@ -213,6 +240,56 @@ static int aspeed_wdt_restart(struct watchdog_device *wdd,
return 0;
}
+static void aspeed_wdt_update_bootstatus(struct platform_device *pdev,
+ struct aspeed_wdt *wdt)
+{
+ const struct resource *res;
+ struct aspeed_wdt_scu scu = wdt->cfg->scu;
+ struct regmap *scu_base;
+ u32 reset_mask_width;
+ u32 reset_mask_shift;
+ u32 idx = 0;
+ u32 status;
+ int ret;
+
+ if (!of_device_is_compatible(pdev->dev.of_node, "aspeed,ast2400-wdt")) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ idx = ((intptr_t)wdt->base & 0x00000fff) / (uintptr_t)resource_size(res);
+ }
+
+ scu_base = syscon_regmap_lookup_by_compatible(scu.compatible);
+ if (IS_ERR(scu_base)) {
+ wdt->wdd.bootstatus = WDIOS_UNKNOWN;
+ return;
+ }
+
+ ret = regmap_read(scu_base, scu.reset_status_reg, &status);
+ if (ret) {
+ wdt->wdd.bootstatus = WDIOS_UNKNOWN;
+ return;
+ }
+
+ reset_mask_width = hweight32(scu.wdt_reset_mask);
+ reset_mask_shift = scu.wdt_reset_mask_shift +
+ reset_mask_width * idx;
+
+ if (status & (scu.wdt_reset_mask << reset_mask_shift))
+ wdt->wdd.bootstatus = WDIOF_CARDRESET;
+
+ /* clear wdt reset event flag */
+ if (of_device_is_compatible(pdev->dev.of_node, "aspeed,ast2400-wdt") ||
+ of_device_is_compatible(pdev->dev.of_node, "aspeed,ast2500-wdt")) {
+ ret = regmap_read(scu_base, scu.reset_status_reg, &status);
+ if (!ret) {
+ status &= ~(scu.wdt_reset_mask << reset_mask_shift);
+ regmap_write(scu_base, scu.reset_status_reg, status);
+ }
+ } else {
+ regmap_write(scu_base, scu.reset_status_reg,
+ scu.wdt_reset_mask << reset_mask_shift);
+ }
+}
+
/* access_cs0 shows if cs0 is accessible, hence the reverted bit */
static ssize_t access_cs0_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -458,10 +535,10 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
writel(duration - 1, wdt->base + WDT_RESET_WIDTH);
}
+ aspeed_wdt_update_bootstatus(pdev, wdt);
+
status = readl(wdt->base + WDT_TIMEOUT_STATUS);
if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY) {
- wdt->wdd.bootstatus = WDIOF_CARDRESET;
-
if (of_device_is_compatible(np, "aspeed,ast2400-wdt") ||
of_device_is_compatible(np, "aspeed,ast2500-wdt"))
wdt->wdd.groups = bswitch_groups;
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 30450e99e5e9..bdd81d8074b2 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -72,6 +72,8 @@
#define EXYNOS850_CLUSTER1_WDTRESET_BIT 23
#define EXYNOSAUTOV9_CLUSTER0_WDTRESET_BIT 25
#define EXYNOSAUTOV9_CLUSTER1_WDTRESET_BIT 24
+#define EXYNOSAUTOV920_CLUSTER0_WDTRESET_BIT 0
+#define EXYNOSAUTOV920_CLUSTER1_WDTRESET_BIT 1
#define GS_CLUSTER0_NONCPU_OUT 0x1220
#define GS_CLUSTER1_NONCPU_OUT 0x1420
@@ -312,9 +314,9 @@ static const struct s3c2410_wdt_variant drv_data_exynosautov920_cl0 = {
.mask_bit = 2,
.mask_reset_inv = true,
.rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
- .rst_stat_bit = EXYNOSAUTOV9_CLUSTER0_WDTRESET_BIT,
+ .rst_stat_bit = EXYNOSAUTOV920_CLUSTER0_WDTRESET_BIT,
.cnt_en_reg = EXYNOSAUTOV920_CLUSTER0_NONCPU_OUT,
- .cnt_en_bit = 7,
+ .cnt_en_bit = 8,
.quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET |
QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN |
QUIRK_HAS_DBGACK_BIT,
@@ -325,9 +327,9 @@ static const struct s3c2410_wdt_variant drv_data_exynosautov920_cl1 = {
.mask_bit = 2,
.mask_reset_inv = true,
.rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
- .rst_stat_bit = EXYNOSAUTOV9_CLUSTER1_WDTRESET_BIT,
+ .rst_stat_bit = EXYNOSAUTOV920_CLUSTER1_WDTRESET_BIT,
.cnt_en_reg = EXYNOSAUTOV920_CLUSTER1_NONCPU_OUT,
- .cnt_en_bit = 7,
+ .cnt_en_bit = 8,
.quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET |
QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN |
QUIRK_HAS_DBGACK_BIT,
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index f7d6f47971fd..24f485827e03 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -278,7 +278,7 @@ config XEN_PRIVCMD_EVENTFD
config XEN_ACPI_PROCESSOR
tristate "Xen ACPI processor"
- depends on XEN && XEN_PV_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
+ depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
default m
help
This ACPI processor uploads Power Management information to the Xen
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 416f231809cb..bfe07adb3e3a 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -43,6 +43,18 @@ static int xen_add_device(struct device *dev)
pci_mcfg_reserved = true;
}
#endif
+
+ if (pci_domain_nr(pci_dev->bus) >> 16) {
+ /*
+ * The hypercall interface is limited to 16bit PCI segment
+ * values, do not attempt to register devices with Xen in
+ * segments greater or equal than 0x10000.
+ */
+ dev_info(dev,
+ "not registering with Xen: invalid PCI segment\n");
+ return 0;
+ }
+
if (pci_seg_supported) {
DEFINE_RAW_FLEX(struct physdev_pci_device_add, add, optarr, 1);
@@ -149,6 +161,16 @@ static int xen_remove_device(struct device *dev)
int r;
struct pci_dev *pci_dev = to_pci_dev(dev);
+ if (pci_domain_nr(pci_dev->bus) >> 16) {
+ /*
+ * The hypercall interface is limited to 16bit PCI segment
+ * values.
+ */
+ dev_info(dev,
+ "not unregistering with Xen: invalid PCI segment\n");
+ return 0;
+ }
+
if (pci_seg_supported) {
struct physdev_pci_device device = {
.seg = pci_domain_nr(pci_dev->bus),
@@ -182,6 +204,16 @@ int xen_reset_device(const struct pci_dev *dev)
.flags = PCI_DEVICE_RESET_FLR,
};
+ if (pci_domain_nr(dev->bus) >> 16) {
+ /*
+ * The hypercall interface is limited to 16bit PCI segment
+ * values.
+ */
+ dev_info(&dev->dev,
+ "unable to notify Xen of device reset: invalid PCI segment\n");
+ return 0;
+ }
+
return HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_reset, &device);
}
EXPORT_SYMBOL_GPL(xen_reset_device);
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 544d3f9010b9..1db82da56db6 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -26,6 +26,8 @@
#define DRV_NAME "xen-platform-pci"
+#define PCI_DEVICE_ID_XEN_PLATFORM_XS61 0x0002
+
static unsigned long platform_mmio;
static unsigned long platform_mmio_alloc;
static unsigned long platform_mmiolen;
@@ -174,6 +176,8 @@ pci_out:
static const struct pci_device_id platform_pci_tbl[] = {
{PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM_XS61,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 1f65795cf5d7..ef56a2500ed6 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -217,6 +217,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
* buffering it.
*/
if (dma_capable(dev, dev_addr, size, true) &&
+ !dma_kmalloc_needs_bounce(dev, size, dir) &&
!range_straddles_page_boundary(phys, size) &&
!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
!is_swiotlb_force_bounce(dev))
diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
index 13821e7e825e..9ac0427724a3 100644
--- a/drivers/xen/xenbus/xenbus.h
+++ b/drivers/xen/xenbus/xenbus.h
@@ -77,6 +77,7 @@ enum xb_req_state {
struct xb_req_data {
struct list_head list;
wait_queue_head_t wq;
+ struct kref kref;
struct xsd_sockmsg msg;
uint32_t caller_req_id;
enum xsd_sockmsg_type type;
@@ -103,6 +104,7 @@ int xb_init_comms(void);
void xb_deinit_comms(void);
int xs_watch_msg(struct xs_watch_event *event);
void xs_request_exit(struct xb_req_data *req);
+void xs_free_req(struct kref *kref);
int xenbus_match(struct device *_dev, const struct device_driver *_drv);
int xenbus_dev_probe(struct device *_dev);
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index e5fda0256feb..82df2da1b880 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -309,8 +309,8 @@ static int process_msg(void)
virt_wmb();
req->state = xb_req_state_got_reply;
req->cb(req);
- } else
- kfree(req);
+ }
+ kref_put(&req->kref, xs_free_req);
}
mutex_unlock(&xs_response_mutex);
@@ -386,14 +386,13 @@ static int process_writes(void)
state.req->msg.type = XS_ERROR;
state.req->err = err;
list_del(&state.req->list);
- if (state.req->state == xb_req_state_aborted)
- kfree(state.req);
- else {
+ if (state.req->state != xb_req_state_aborted) {
/* write err, then update state */
virt_wmb();
state.req->state = xb_req_state_got_reply;
wake_up(&state.req->wq);
}
+ kref_put(&state.req->kref, xs_free_req);
mutex_unlock(&xb_write_mutex);
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 46f8916597e5..f5c21ba64df5 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -406,7 +406,7 @@ void xenbus_dev_queue_reply(struct xb_req_data *req)
mutex_unlock(&u->reply_mutex);
kfree(req->body);
- kfree(req);
+ kref_put(&req->kref, xs_free_req);
kref_put(&u->kref, xenbus_file_free);
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 6d32ffb01136..86fe6e779056 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -966,9 +966,15 @@ static int __init xenbus_init(void)
if (xen_pv_domain())
xen_store_domain_type = XS_PV;
if (xen_hvm_domain())
+ {
xen_store_domain_type = XS_HVM;
- if (xen_hvm_domain() && xen_initial_domain())
- xen_store_domain_type = XS_LOCAL;
+ err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
+ if (err)
+ goto out_error;
+ xen_store_evtchn = (int)v;
+ if (!v && xen_initial_domain())
+ xen_store_domain_type = XS_LOCAL;
+ }
if (xen_pv_domain() && !xen_start_info->store_evtchn)
xen_store_domain_type = XS_LOCAL;
if (xen_pv_domain() && xen_start_info->store_evtchn)
@@ -987,10 +993,6 @@ static int __init xenbus_init(void)
xen_store_interface = gfn_to_virt(xen_store_gfn);
break;
case XS_HVM:
- err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
- if (err)
- goto out_error;
- xen_store_evtchn = (int)v;
err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
if (err)
goto out_error;
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index d32c726f7a12..dcf9182c8451 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -112,6 +112,12 @@ static void xs_suspend_exit(void)
wake_up_all(&xs_state_enter_wq);
}
+void xs_free_req(struct kref *kref)
+{
+ struct xb_req_data *req = container_of(kref, struct xb_req_data, kref);
+ kfree(req);
+}
+
static uint32_t xs_request_enter(struct xb_req_data *req)
{
uint32_t rq_id;
@@ -237,6 +243,12 @@ static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg)
req->caller_req_id = req->msg.req_id;
req->msg.req_id = xs_request_enter(req);
+ /*
+ * Take 2nd ref. One for this thread, and the second for the
+ * xenbus_thread.
+ */
+ kref_get(&req->kref);
+
mutex_lock(&xb_write_mutex);
list_add_tail(&req->list, &xb_write_list);
notify = list_is_singular(&xb_write_list);
@@ -261,8 +273,8 @@ static void *xs_wait_for_reply(struct xb_req_data *req, struct xsd_sockmsg *msg)
if (req->state == xb_req_state_queued ||
req->state == xb_req_state_wait_reply)
req->state = xb_req_state_aborted;
- else
- kfree(req);
+
+ kref_put(&req->kref, xs_free_req);
mutex_unlock(&xb_write_mutex);
return ret;
@@ -291,6 +303,7 @@ int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par)
req->cb = xenbus_dev_queue_reply;
req->par = par;
req->user_req = true;
+ kref_init(&req->kref);
xs_send(req, msg);
@@ -319,6 +332,7 @@ static void *xs_talkv(struct xenbus_transaction t,
req->num_vecs = num_vecs;
req->cb = xs_wake_up;
req->user_req = false;
+ kref_init(&req->kref);
msg.req_id = 0;
msg.tx_id = t.id;